function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
---|---|---|---|---|---|---|
reorder_categories
|
Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
# MASKED: reorder_categories function (lines 945-985)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
| 945 | 985 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
add_categories
|
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
# MASKED: add_categories function (lines 987-1034)
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
| 987 | 1,034 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
remove_categories
|
Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
# MASKED: remove_categories function (lines 1036-1086)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
| 1,036 | 1,086 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
remove_unused_categories
|
Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
# MASKED: remove_unused_categories function (lines 1088-1123)
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
| 1,088 | 1,123 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
value_counts
|
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
# MASKED: value_counts function (lines 1419-1456)
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
| 1,419 | 1,456 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
get_values
|
Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
# MASKED: get_values function (lines 1458-1472)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
| 1,458 | 1,472 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
sort_values
|
Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
# MASKED: sort_values function (lines 1530-1634)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
| 1,530 | 1,634 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
_values_for_rank
|
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
# MASKED: _values_for_rank function (lines 1636-1663)
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
| 1,636 | 1,663 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
_reverse_indexer
|
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
# MASKED: _reverse_indexer function (lines 2032-2069)
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
| 2,032 | 2,069 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
min
|
The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
# MASKED: min function (lines 2079-2102)
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
| 2,079 | 2,102 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
max
|
The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
# MASKED: max function (lines 2104-2127)
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
| 2,104 | 2,127 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
mode
|
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
# MASKED: mode function (lines 2129-2153)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
| 2,129 | 2,153 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
unique
|
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# MASKED: unique function (lines 2155-2209)
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
| 2,155 | 2,209 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
isin
|
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
# MASKED: isin function (lines 2308-2359)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
| 2,308 | 2,359 |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
submit
|
Submits a transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param transaction: A transaction object (or a similar class) that's already been saved to the database.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
|
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
import requests
from . import exceptions
class Gateway(models.Model):
label = models.CharField(max_length=255, verbose_name=_('Label'))
api_key = models.CharField(max_length=255, verbose_name=_('API Key'))
default_callback = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Redirect to'), help_text=_('Enter the path name for a view that will verify the transaction.'))
class Meta:
verbose_name = _('Gateway')
verbose_name_plural = _('Gateways')
submission_url = 'https://pay.ir/pg/send'
verification_url = 'https://pay.ir/pg/verify'
def _prepare_submission_payload(self, request, transaction, mobile, valid_card_number, callback):
if callback is None:
raise ValueError('You need to specify a path name as the callback for your transactions.')
return {
'api': self.api_key,
'amount': transaction.amount,
'redirect': request.build_absolute_uri(reverse(callback)),
'mobile': mobile,
'factorNumber': transaction.id,
'description': transaction.description,
'validCardNumber': valid_card_number
}
# MASKED: submit function (lines 37-57)
def create_and_submit(self, request, account, amount: int, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Creates a transaction object and submits the transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey.
:param amount: The amount of the transaction in IRR. The amount has to be more than 1000.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
transaction = Transaction(account=account, amount=amount)
transaction.save()
return self.submit(request, transaction, mobile, valid_card_number, callback)
def verify(self, transaction):
"""Verifies the transaction with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param transaction: The transaction object corresponding to the specified token in request.GET.
"""
payload = {'api': self.api_key, 'token': transaction.token}
response = requests.post(self.verification_url, data=payload)
data = response.json()
if response:
if not transaction.verified:
transaction.gateway = self
transaction.verified = True
transaction.verified_at = timezone.now()
transaction.save()
return transaction, True
else:
return transaction, False
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def find_and_verify(self, token: str):
"""Finds a transaction with a matching token value and verifies it with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param token: The token of the transaction, which can be found in request.GET. The method will look for a
transaction object with the same token and return it as the first argument.
"""
transaction = Transaction.objects.get(token=token)
return self.verify(transaction)
def __str__(self):
return self.label
class Transaction(models.Model):
account = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_('Account'))
created = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name=_('Created'))
modified = models.DateTimeField(auto_now=True, verbose_name=_('Modified'))
amount = models.IntegerField(verbose_name=_('Amount (IRR)'))
description = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Description'))
gateway = models.ForeignKey(to=Gateway, on_delete=models.SET_NULL, null=True, blank=True, verbose_name=_('Gateway'))
token = models.TextField(null=True, blank=True, unique=True, verbose_name=_('Token'))
verified = models.BooleanField(default=False, verbose_name=_('Verified'))
verified_at = models.DateTimeField(null=True, blank=True, verbose_name=_('Verified At'))
class Meta:
ordering = ['-modified']
verbose_name = _('Transaction')
verbose_name_plural = _('Transactions')
def __str__(self):
return _('Transaction %(id)d') % {'id': self.id}
|
def submit(self, request, transaction, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Submits a transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param transaction: A transaction object (or a similar class) that's already been saved to the database.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
payload = self._prepare_submission_payload(request, transaction, mobile, valid_card_number, callback or self.default_callback)
response = requests.post(self.submission_url, data=payload)
data = response.json()
if response:
transaction.token = data['token']
transaction.save()
return redirect(f'https://pay.ir/pg/{transaction.token}')
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
| 37 | 57 |
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
import requests
from . import exceptions
class Gateway(models.Model):
label = models.CharField(max_length=255, verbose_name=_('Label'))
api_key = models.CharField(max_length=255, verbose_name=_('API Key'))
default_callback = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Redirect to'), help_text=_('Enter the path name for a view that will verify the transaction.'))
class Meta:
verbose_name = _('Gateway')
verbose_name_plural = _('Gateways')
submission_url = 'https://pay.ir/pg/send'
verification_url = 'https://pay.ir/pg/verify'
def _prepare_submission_payload(self, request, transaction, mobile, valid_card_number, callback):
if callback is None:
raise ValueError('You need to specify a path name as the callback for your transactions.')
return {
'api': self.api_key,
'amount': transaction.amount,
'redirect': request.build_absolute_uri(reverse(callback)),
'mobile': mobile,
'factorNumber': transaction.id,
'description': transaction.description,
'validCardNumber': valid_card_number
}
def submit(self, request, transaction, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Submits a transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param transaction: A transaction object (or a similar class) that's already been saved to the database.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
payload = self._prepare_submission_payload(request, transaction, mobile, valid_card_number, callback or self.default_callback)
response = requests.post(self.submission_url, data=payload)
data = response.json()
if response:
transaction.token = data['token']
transaction.save()
return redirect(f'https://pay.ir/pg/{transaction.token}')
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def create_and_submit(self, request, account, amount: int, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Creates a transaction object and submits the transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey.
:param amount: The amount of the transaction in IRR. The amount has to be more than 1000.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
transaction = Transaction(account=account, amount=amount)
transaction.save()
return self.submit(request, transaction, mobile, valid_card_number, callback)
def verify(self, transaction):
"""Verifies the transaction with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param transaction: The transaction object corresponding to the specified token in request.GET.
"""
payload = {'api': self.api_key, 'token': transaction.token}
response = requests.post(self.verification_url, data=payload)
data = response.json()
if response:
if not transaction.verified:
transaction.gateway = self
transaction.verified = True
transaction.verified_at = timezone.now()
transaction.save()
return transaction, True
else:
return transaction, False
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def find_and_verify(self, token: str):
"""Finds a transaction with a matching token value and verifies it with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param token: The token of the transaction, which can be found in request.GET. The method will look for a
transaction object with the same token and return it as the first argument.
"""
transaction = Transaction.objects.get(token=token)
return self.verify(transaction)
def __str__(self):
return self.label
class Transaction(models.Model):
account = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_('Account'))
created = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name=_('Created'))
modified = models.DateTimeField(auto_now=True, verbose_name=_('Modified'))
amount = models.IntegerField(verbose_name=_('Amount (IRR)'))
description = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Description'))
gateway = models.ForeignKey(to=Gateway, on_delete=models.SET_NULL, null=True, blank=True, verbose_name=_('Gateway'))
token = models.TextField(null=True, blank=True, unique=True, verbose_name=_('Token'))
verified = models.BooleanField(default=False, verbose_name=_('Verified'))
verified_at = models.DateTimeField(null=True, blank=True, verbose_name=_('Verified At'))
class Meta:
ordering = ['-modified']
verbose_name = _('Transaction')
verbose_name_plural = _('Transactions')
def __str__(self):
return _('Transaction %(id)d') % {'id': self.id}
|
create_and_submit
|
Creates a transaction object and submits the transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey.
:param amount: The amount of the transaction in IRR. The amount has to be more than 1000.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
|
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
import requests
from . import exceptions
class Gateway(models.Model):
label = models.CharField(max_length=255, verbose_name=_('Label'))
api_key = models.CharField(max_length=255, verbose_name=_('API Key'))
default_callback = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Redirect to'), help_text=_('Enter the path name for a view that will verify the transaction.'))
class Meta:
verbose_name = _('Gateway')
verbose_name_plural = _('Gateways')
submission_url = 'https://pay.ir/pg/send'
verification_url = 'https://pay.ir/pg/verify'
def _prepare_submission_payload(self, request, transaction, mobile, valid_card_number, callback):
if callback is None:
raise ValueError('You need to specify a path name as the callback for your transactions.')
return {
'api': self.api_key,
'amount': transaction.amount,
'redirect': request.build_absolute_uri(reverse(callback)),
'mobile': mobile,
'factorNumber': transaction.id,
'description': transaction.description,
'validCardNumber': valid_card_number
}
def submit(self, request, transaction, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Submits a transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param transaction: A transaction object (or a similar class) that's already been saved to the database.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
payload = self._prepare_submission_payload(request, transaction, mobile, valid_card_number, callback or self.default_callback)
response = requests.post(self.submission_url, data=payload)
data = response.json()
if response:
transaction.token = data['token']
transaction.save()
return redirect(f'https://pay.ir/pg/{transaction.token}')
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
# MASKED: create_and_submit function (lines 59-75)
def verify(self, transaction):
"""Verifies the transaction with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param transaction: The transaction object corresponding to the specified token in request.GET.
"""
payload = {'api': self.api_key, 'token': transaction.token}
response = requests.post(self.verification_url, data=payload)
data = response.json()
if response:
if not transaction.verified:
transaction.gateway = self
transaction.verified = True
transaction.verified_at = timezone.now()
transaction.save()
return transaction, True
else:
return transaction, False
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def find_and_verify(self, token: str):
"""Finds a transaction with a matching token value and verifies it with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param token: The token of the transaction, which can be found in request.GET. The method will look for a
transaction object with the same token and return it as the first argument.
"""
transaction = Transaction.objects.get(token=token)
return self.verify(transaction)
def __str__(self):
return self.label
class Transaction(models.Model):
account = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_('Account'))
created = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name=_('Created'))
modified = models.DateTimeField(auto_now=True, verbose_name=_('Modified'))
amount = models.IntegerField(verbose_name=_('Amount (IRR)'))
description = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Description'))
gateway = models.ForeignKey(to=Gateway, on_delete=models.SET_NULL, null=True, blank=True, verbose_name=_('Gateway'))
token = models.TextField(null=True, blank=True, unique=True, verbose_name=_('Token'))
verified = models.BooleanField(default=False, verbose_name=_('Verified'))
verified_at = models.DateTimeField(null=True, blank=True, verbose_name=_('Verified At'))
class Meta:
ordering = ['-modified']
verbose_name = _('Transaction')
verbose_name_plural = _('Transactions')
def __str__(self):
return _('Transaction %(id)d') % {'id': self.id}
|
def create_and_submit(self, request, account, amount: int, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Creates a transaction object and submits the transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey.
:param amount: The amount of the transaction in IRR. The amount has to be more than 1000.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
transaction = Transaction(account=account, amount=amount)
transaction.save()
return self.submit(request, transaction, mobile, valid_card_number, callback)
| 59 | 75 |
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
import requests
from . import exceptions
class Gateway(models.Model):
label = models.CharField(max_length=255, verbose_name=_('Label'))
api_key = models.CharField(max_length=255, verbose_name=_('API Key'))
default_callback = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Redirect to'), help_text=_('Enter the path name for a view that will verify the transaction.'))
class Meta:
verbose_name = _('Gateway')
verbose_name_plural = _('Gateways')
submission_url = 'https://pay.ir/pg/send'
verification_url = 'https://pay.ir/pg/verify'
def _prepare_submission_payload(self, request, transaction, mobile, valid_card_number, callback):
if callback is None:
raise ValueError('You need to specify a path name as the callback for your transactions.')
return {
'api': self.api_key,
'amount': transaction.amount,
'redirect': request.build_absolute_uri(reverse(callback)),
'mobile': mobile,
'factorNumber': transaction.id,
'description': transaction.description,
'validCardNumber': valid_card_number
}
def submit(self, request, transaction, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Submits a transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param transaction: A transaction object (or a similar class) that's already been saved to the database.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
payload = self._prepare_submission_payload(request, transaction, mobile, valid_card_number, callback or self.default_callback)
response = requests.post(self.submission_url, data=payload)
data = response.json()
if response:
transaction.token = data['token']
transaction.save()
return redirect(f'https://pay.ir/pg/{transaction.token}')
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def create_and_submit(self, request, account, amount: int, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Creates a transaction object and submits the transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey.
:param amount: The amount of the transaction in IRR. The amount has to be more than 1000.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
transaction = Transaction(account=account, amount=amount)
transaction.save()
return self.submit(request, transaction, mobile, valid_card_number, callback)
def verify(self, transaction):
"""Verifies the transaction with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param transaction: The transaction object corresponding to the specified token in request.GET.
"""
payload = {'api': self.api_key, 'token': transaction.token}
response = requests.post(self.verification_url, data=payload)
data = response.json()
if response:
if not transaction.verified:
transaction.gateway = self
transaction.verified = True
transaction.verified_at = timezone.now()
transaction.save()
return transaction, True
else:
return transaction, False
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def find_and_verify(self, token: str):
"""Finds a transaction with a matching token value and verifies it with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param token: The token of the transaction, which can be found in request.GET. The method will look for a
transaction object with the same token and return it as the first argument.
"""
transaction = Transaction.objects.get(token=token)
return self.verify(transaction)
def __str__(self):
return self.label
class Transaction(models.Model):
account = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_('Account'))
created = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name=_('Created'))
modified = models.DateTimeField(auto_now=True, verbose_name=_('Modified'))
amount = models.IntegerField(verbose_name=_('Amount (IRR)'))
description = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Description'))
gateway = models.ForeignKey(to=Gateway, on_delete=models.SET_NULL, null=True, blank=True, verbose_name=_('Gateway'))
token = models.TextField(null=True, blank=True, unique=True, verbose_name=_('Token'))
verified = models.BooleanField(default=False, verbose_name=_('Verified'))
verified_at = models.DateTimeField(null=True, blank=True, verbose_name=_('Verified At'))
class Meta:
ordering = ['-modified']
verbose_name = _('Transaction')
verbose_name_plural = _('Transactions')
def __str__(self):
return _('Transaction %(id)d') % {'id': self.id}
|
__call__
|
params:
s: [B*T, x]
visual_s: [B*T, y]
cell_state: Tuple([B, z],)
return:
feat: [B, a]
cell_state: Tuple([B, z],)
|
import numpy as np
import tensorflow as tf
from copy import deepcopy
from abc import ABC, abstractmethod
from tensorflow.keras import Model as M
from rls.utils.indexs import OutputNetworkType
from rls.nn.networks import get_visual_network_from_type
from rls.nn.models import get_output_network_from_type
from rls.nn.networks import (MultiVectorNetwork,
MultiVisualNetwork,
EncoderNetwork,
MemoryNetwork)
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
class RepresentationNetwork(ABC):
def __init__(self, name: str = 'test'):
self.name = name
self.h_dim = None
@abstractmethod
def __call__(self):
pass
@property
@abstractmethod
def trainable_variables(self):
pass
@property
@abstractmethod
def weights(self):
pass
@property
@abstractmethod
def _policy_models(self):
pass
@property
@abstractmethod
def _all_models(self):
pass
class DefaultRepresentationNetwork(RepresentationNetwork):
'''
visual_s -> visual_net -> feat ↘
feat -> encoder_net -> feat ↘ ↗ feat
s -> vector_net -> feat ↗ -> memory_net ->
cell_state ↗ ↘ cell_state
'''
def __init__(self,
name: str = 'test',
vec_dims=[],
vis_dims=[],
vector_net_kwargs: dict = {},
visual_net_kwargs: dict = {},
encoder_net_kwargs: dict = {},
memory_net_kwargs: dict = {}):
super().__init__(name)
self.vector_net = MultiVectorNetwork(vec_dims, **vector_net_kwargs)
logger.debug('initialize vector network successfully.')
self.visual_net = MultiVisualNetwork(vis_dims, **visual_net_kwargs)
logger.debug('initialize visual network successfully.')
encoder_dim = self.vector_net.h_dim + self.visual_net.h_dim
self.encoder_net = EncoderNetwork(encoder_dim, **encoder_net_kwargs)
logger.debug('initialize encoder network successfully.')
memory_dim = self.encoder_net.h_dim
self.memory_net = MemoryNetwork(memory_dim, **memory_net_kwargs)
logger.debug('initialize memory network successfully.')
self.h_dim = self.memory_net.h_dim
def split(self, batch_size, data):
'''TODO: Annotation
params:
batch_size: int
data: [B, x]
'''
if self.memory_net.use_rnn:
data = tf.reshape(data, [batch_size, -1, tf.shape(data)[-1]])
d, d_ = data[:, :-1], data[:, 1:]
d, d_ = tf.reshape(d, [-1, tf.shape(d)[-1]]), tf.reshape(d_, [-1, tf.shape(d_)[-1]])
return d, d_
else:
return tf.split(data, num_or_size_splits=2, axis=0)
# MASKED: __call__ function (lines 99-126)
def get_vis_feature(self, visual_s):
'''
params:
visual_s: [B, N, H, W, C]
return:
feat: [B, x]
'''
# TODO
viss = [visual_s[:, i] for i in range(visual_s.shape[1])]
return self.visual_net(*viss)
def get_vec_feature(self, s):
'''
params:
s: [B, x]
return:
feat: [B, y]
'''
return self.vector_net(s)
def get_encoder_feature(self, s, visual_s):
'''
params:
s: [B, x]
visual_s: [B, y]
return:
feat: [B, z]
'''
if self.vector_net.use_vector and self.visual_net.use_visual:
feat = self.get_vec_feature(s)
vis_feat = self.get_vis_feature(visual_s)
feat = tf.concat([feat, vis_feat], axis=-1)
elif self.visual_net.use_visual:
vis_feat = self.get_vis_feature(visual_s)
feat = vis_feat
else:
feat = self.get_vec_feature(s)
encoder_feature = self.encoder_net(feat)
return encoder_feature
@property
def trainable_variables(self):
tv = []
tv += self.vector_net.trainable_variables
tv += self.visual_net.trainable_variables
tv += self.encoder_net.trainable_variables
tv += self.memory_net.trainable_variables
return tv
@property
def weights(self):
ws = []
ws += self.vector_net.weights
ws += self.visual_net.weights
ws += self.encoder_net.weights
ws += self.memory_net.weights
return ws
@property
def _policy_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
@property
def _all_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
class ValueNetwork:
'''
feat -> value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
assert value_net_type is not None, 'assert value_net_type is not None'
super().__init__()
self.name = name
self.representation_net = representation_net
if self.representation_net is not None:
self.value_net = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
assert self.representation_net is not None, 'self.representation_net is not None'
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
return output, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
return output
@property
def trainable_variables(self):
tv = self.representation_net.trainable_variables if self.representation_net else []
tv += self.value_net.trainable_variables
return tv
@property
def weights(self):
ws = self.representation_net.weights if self.representation_net else []
ws += self.value_net.weights
return ws
@property
def _policy_models(self):
models = self.representation_net._policy_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
@property
def _all_models(self):
models = self.representation_net._all_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
class DoubleValueNetwork(ValueNetwork):
'''
↗ value_net1 -> outputs
feat
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ACNetwork(ValueNetwork):
'''
↗ policy_net -> outputs
feat
↘ value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.policy_net = get_output_network_from_type(policy_net_type)(
vector_dim=self.representation_net.h_dim, **policy_net_kwargs)
else:
self.policy_net = get_output_network_from_type(policy_net_type)(
**policy_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.policy_net(feat, *args, **kwargs)
return output, cell_state
@property
def actor_trainable_variables(self):
return self.policy_net.trainable_variables
@property
def critic_trainable_variables(self):
return super().trainable_variables
@property
def weights(self):
return super().weights + self.policy_net.weights
@property
def _policy_models(self):
'''重载'''
models = super()._policy_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
class ACCNetwork(ACNetwork):
'''
Use for PD-DDPG
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {},
value_net2_type: OutputNetworkType = None,
value_net2_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net2_type)(
vector_dim=self.representation_net.h_dim, **value_net2_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net2_type)(
**value_net2_kwargs)
@property
def critic_trainable_variables(self):
return super().critic_trainable_variables + self.value_net2.trainable_variables
@property
def value_net_trainable_variables(self):
return super().critic_trainable_variables
@property
def value_net2_trainable_variables(self):
return self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ADoubleCNetwork(ACNetwork):
'''
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def critic_trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
|
def __call__(self, s, visual_s, cell_state, *, need_split=False):
'''
params:
s: [B*T, x]
visual_s: [B*T, y]
cell_state: Tuple([B, z],)
return:
feat: [B, a]
cell_state: Tuple([B, z],)
'''
batch_size = tf.shape(s)[0]
if self.memory_net.use_rnn:
s = tf.reshape(s, [-1, tf.shape(s)[-1]]) # [B, T+1, N] => [B*(T+1), N]
if self.visual_net.use_visual:
visual_s = tf.reshape(visual_s, [-1, *tf.shape(visual_s)[2:]])
feat = self.get_encoder_feature(s, visual_s)
if self.memory_net.use_rnn:
# reshape feature from [B*T, x] to [B, T, x]
feat = tf.reshape(feat, (batch_size, -1, tf.shape(feat)[-1]))
feat, cell_state = self.memory_net(feat, *cell_state)
# reshape feature from [B, T, x] to [B*T, x]
feat = tf.reshape(feat, (-1, tf.shape(feat)[-1]))
if need_split:
feat = self.split(batch_size, feat)
return feat, cell_state
| 99 | 126 |
import numpy as np
import tensorflow as tf
from copy import deepcopy
from abc import ABC, abstractmethod
from tensorflow.keras import Model as M
from rls.utils.indexs import OutputNetworkType
from rls.nn.networks import get_visual_network_from_type
from rls.nn.models import get_output_network_from_type
from rls.nn.networks import (MultiVectorNetwork,
MultiVisualNetwork,
EncoderNetwork,
MemoryNetwork)
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
class RepresentationNetwork(ABC):
def __init__(self, name: str = 'test'):
self.name = name
self.h_dim = None
@abstractmethod
def __call__(self):
pass
@property
@abstractmethod
def trainable_variables(self):
pass
@property
@abstractmethod
def weights(self):
pass
@property
@abstractmethod
def _policy_models(self):
pass
@property
@abstractmethod
def _all_models(self):
pass
class DefaultRepresentationNetwork(RepresentationNetwork):
'''
visual_s -> visual_net -> feat ↘
feat -> encoder_net -> feat ↘ ↗ feat
s -> vector_net -> feat ↗ -> memory_net ->
cell_state ↗ ↘ cell_state
'''
def __init__(self,
name: str = 'test',
vec_dims=[],
vis_dims=[],
vector_net_kwargs: dict = {},
visual_net_kwargs: dict = {},
encoder_net_kwargs: dict = {},
memory_net_kwargs: dict = {}):
super().__init__(name)
self.vector_net = MultiVectorNetwork(vec_dims, **vector_net_kwargs)
logger.debug('initialize vector network successfully.')
self.visual_net = MultiVisualNetwork(vis_dims, **visual_net_kwargs)
logger.debug('initialize visual network successfully.')
encoder_dim = self.vector_net.h_dim + self.visual_net.h_dim
self.encoder_net = EncoderNetwork(encoder_dim, **encoder_net_kwargs)
logger.debug('initialize encoder network successfully.')
memory_dim = self.encoder_net.h_dim
self.memory_net = MemoryNetwork(memory_dim, **memory_net_kwargs)
logger.debug('initialize memory network successfully.')
self.h_dim = self.memory_net.h_dim
def split(self, batch_size, data):
'''TODO: Annotation
params:
batch_size: int
data: [B, x]
'''
if self.memory_net.use_rnn:
data = tf.reshape(data, [batch_size, -1, tf.shape(data)[-1]])
d, d_ = data[:, :-1], data[:, 1:]
d, d_ = tf.reshape(d, [-1, tf.shape(d)[-1]]), tf.reshape(d_, [-1, tf.shape(d_)[-1]])
return d, d_
else:
return tf.split(data, num_or_size_splits=2, axis=0)
def __call__(self, s, visual_s, cell_state, *, need_split=False):
'''
params:
s: [B*T, x]
visual_s: [B*T, y]
cell_state: Tuple([B, z],)
return:
feat: [B, a]
cell_state: Tuple([B, z],)
'''
batch_size = tf.shape(s)[0]
if self.memory_net.use_rnn:
s = tf.reshape(s, [-1, tf.shape(s)[-1]]) # [B, T+1, N] => [B*(T+1), N]
if self.visual_net.use_visual:
visual_s = tf.reshape(visual_s, [-1, *tf.shape(visual_s)[2:]])
feat = self.get_encoder_feature(s, visual_s)
if self.memory_net.use_rnn:
# reshape feature from [B*T, x] to [B, T, x]
feat = tf.reshape(feat, (batch_size, -1, tf.shape(feat)[-1]))
feat, cell_state = self.memory_net(feat, *cell_state)
# reshape feature from [B, T, x] to [B*T, x]
feat = tf.reshape(feat, (-1, tf.shape(feat)[-1]))
if need_split:
feat = self.split(batch_size, feat)
return feat, cell_state
def get_vis_feature(self, visual_s):
'''
params:
visual_s: [B, N, H, W, C]
return:
feat: [B, x]
'''
# TODO
viss = [visual_s[:, i] for i in range(visual_s.shape[1])]
return self.visual_net(*viss)
def get_vec_feature(self, s):
'''
params:
s: [B, x]
return:
feat: [B, y]
'''
return self.vector_net(s)
def get_encoder_feature(self, s, visual_s):
'''
params:
s: [B, x]
visual_s: [B, y]
return:
feat: [B, z]
'''
if self.vector_net.use_vector and self.visual_net.use_visual:
feat = self.get_vec_feature(s)
vis_feat = self.get_vis_feature(visual_s)
feat = tf.concat([feat, vis_feat], axis=-1)
elif self.visual_net.use_visual:
vis_feat = self.get_vis_feature(visual_s)
feat = vis_feat
else:
feat = self.get_vec_feature(s)
encoder_feature = self.encoder_net(feat)
return encoder_feature
@property
def trainable_variables(self):
tv = []
tv += self.vector_net.trainable_variables
tv += self.visual_net.trainable_variables
tv += self.encoder_net.trainable_variables
tv += self.memory_net.trainable_variables
return tv
@property
def weights(self):
ws = []
ws += self.vector_net.weights
ws += self.visual_net.weights
ws += self.encoder_net.weights
ws += self.memory_net.weights
return ws
@property
def _policy_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
@property
def _all_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
class ValueNetwork:
'''
feat -> value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
assert value_net_type is not None, 'assert value_net_type is not None'
super().__init__()
self.name = name
self.representation_net = representation_net
if self.representation_net is not None:
self.value_net = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
assert self.representation_net is not None, 'self.representation_net is not None'
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
return output, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
return output
@property
def trainable_variables(self):
tv = self.representation_net.trainable_variables if self.representation_net else []
tv += self.value_net.trainable_variables
return tv
@property
def weights(self):
ws = self.representation_net.weights if self.representation_net else []
ws += self.value_net.weights
return ws
@property
def _policy_models(self):
models = self.representation_net._policy_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
@property
def _all_models(self):
models = self.representation_net._all_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
class DoubleValueNetwork(ValueNetwork):
'''
↗ value_net1 -> outputs
feat
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ACNetwork(ValueNetwork):
'''
↗ policy_net -> outputs
feat
↘ value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.policy_net = get_output_network_from_type(policy_net_type)(
vector_dim=self.representation_net.h_dim, **policy_net_kwargs)
else:
self.policy_net = get_output_network_from_type(policy_net_type)(
**policy_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.policy_net(feat, *args, **kwargs)
return output, cell_state
@property
def actor_trainable_variables(self):
return self.policy_net.trainable_variables
@property
def critic_trainable_variables(self):
return super().trainable_variables
@property
def weights(self):
return super().weights + self.policy_net.weights
@property
def _policy_models(self):
'''重载'''
models = super()._policy_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
class ACCNetwork(ACNetwork):
'''
Use for PD-DDPG
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {},
value_net2_type: OutputNetworkType = None,
value_net2_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net2_type)(
vector_dim=self.representation_net.h_dim, **value_net2_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net2_type)(
**value_net2_kwargs)
@property
def critic_trainable_variables(self):
return super().critic_trainable_variables + self.value_net2.trainable_variables
@property
def value_net_trainable_variables(self):
return super().critic_trainable_variables
@property
def value_net2_trainable_variables(self):
return self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ADoubleCNetwork(ACNetwork):
'''
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def critic_trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
|
get_encoder_feature
|
params:
s: [B, x]
visual_s: [B, y]
return:
feat: [B, z]
|
import numpy as np
import tensorflow as tf
from copy import deepcopy
from abc import ABC, abstractmethod
from tensorflow.keras import Model as M
from rls.utils.indexs import OutputNetworkType
from rls.nn.networks import get_visual_network_from_type
from rls.nn.models import get_output_network_from_type
from rls.nn.networks import (MultiVectorNetwork,
MultiVisualNetwork,
EncoderNetwork,
MemoryNetwork)
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
class RepresentationNetwork(ABC):
def __init__(self, name: str = 'test'):
self.name = name
self.h_dim = None
@abstractmethod
def __call__(self):
pass
@property
@abstractmethod
def trainable_variables(self):
pass
@property
@abstractmethod
def weights(self):
pass
@property
@abstractmethod
def _policy_models(self):
pass
@property
@abstractmethod
def _all_models(self):
pass
class DefaultRepresentationNetwork(RepresentationNetwork):
'''
visual_s -> visual_net -> feat ↘
feat -> encoder_net -> feat ↘ ↗ feat
s -> vector_net -> feat ↗ -> memory_net ->
cell_state ↗ ↘ cell_state
'''
def __init__(self,
name: str = 'test',
vec_dims=[],
vis_dims=[],
vector_net_kwargs: dict = {},
visual_net_kwargs: dict = {},
encoder_net_kwargs: dict = {},
memory_net_kwargs: dict = {}):
super().__init__(name)
self.vector_net = MultiVectorNetwork(vec_dims, **vector_net_kwargs)
logger.debug('initialize vector network successfully.')
self.visual_net = MultiVisualNetwork(vis_dims, **visual_net_kwargs)
logger.debug('initialize visual network successfully.')
encoder_dim = self.vector_net.h_dim + self.visual_net.h_dim
self.encoder_net = EncoderNetwork(encoder_dim, **encoder_net_kwargs)
logger.debug('initialize encoder network successfully.')
memory_dim = self.encoder_net.h_dim
self.memory_net = MemoryNetwork(memory_dim, **memory_net_kwargs)
logger.debug('initialize memory network successfully.')
self.h_dim = self.memory_net.h_dim
def split(self, batch_size, data):
'''TODO: Annotation
params:
batch_size: int
data: [B, x]
'''
if self.memory_net.use_rnn:
data = tf.reshape(data, [batch_size, -1, tf.shape(data)[-1]])
d, d_ = data[:, :-1], data[:, 1:]
d, d_ = tf.reshape(d, [-1, tf.shape(d)[-1]]), tf.reshape(d_, [-1, tf.shape(d_)[-1]])
return d, d_
else:
return tf.split(data, num_or_size_splits=2, axis=0)
def __call__(self, s, visual_s, cell_state, *, need_split=False):
'''
params:
s: [B*T, x]
visual_s: [B*T, y]
cell_state: Tuple([B, z],)
return:
feat: [B, a]
cell_state: Tuple([B, z],)
'''
batch_size = tf.shape(s)[0]
if self.memory_net.use_rnn:
s = tf.reshape(s, [-1, tf.shape(s)[-1]]) # [B, T+1, N] => [B*(T+1), N]
if self.visual_net.use_visual:
visual_s = tf.reshape(visual_s, [-1, *tf.shape(visual_s)[2:]])
feat = self.get_encoder_feature(s, visual_s)
if self.memory_net.use_rnn:
# reshape feature from [B*T, x] to [B, T, x]
feat = tf.reshape(feat, (batch_size, -1, tf.shape(feat)[-1]))
feat, cell_state = self.memory_net(feat, *cell_state)
# reshape feature from [B, T, x] to [B*T, x]
feat = tf.reshape(feat, (-1, tf.shape(feat)[-1]))
if need_split:
feat = self.split(batch_size, feat)
return feat, cell_state
def get_vis_feature(self, visual_s):
'''
params:
visual_s: [B, N, H, W, C]
return:
feat: [B, x]
'''
# TODO
viss = [visual_s[:, i] for i in range(visual_s.shape[1])]
return self.visual_net(*viss)
def get_vec_feature(self, s):
'''
params:
s: [B, x]
return:
feat: [B, y]
'''
return self.vector_net(s)
# MASKED: get_encoder_feature function (lines 148-168)
@property
def trainable_variables(self):
tv = []
tv += self.vector_net.trainable_variables
tv += self.visual_net.trainable_variables
tv += self.encoder_net.trainable_variables
tv += self.memory_net.trainable_variables
return tv
@property
def weights(self):
ws = []
ws += self.vector_net.weights
ws += self.visual_net.weights
ws += self.encoder_net.weights
ws += self.memory_net.weights
return ws
@property
def _policy_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
@property
def _all_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
class ValueNetwork:
'''
feat -> value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
assert value_net_type is not None, 'assert value_net_type is not None'
super().__init__()
self.name = name
self.representation_net = representation_net
if self.representation_net is not None:
self.value_net = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
assert self.representation_net is not None, 'self.representation_net is not None'
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
return output, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
return output
@property
def trainable_variables(self):
tv = self.representation_net.trainable_variables if self.representation_net else []
tv += self.value_net.trainable_variables
return tv
@property
def weights(self):
ws = self.representation_net.weights if self.representation_net else []
ws += self.value_net.weights
return ws
@property
def _policy_models(self):
models = self.representation_net._policy_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
@property
def _all_models(self):
models = self.representation_net._all_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
class DoubleValueNetwork(ValueNetwork):
'''
↗ value_net1 -> outputs
feat
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ACNetwork(ValueNetwork):
'''
↗ policy_net -> outputs
feat
↘ value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.policy_net = get_output_network_from_type(policy_net_type)(
vector_dim=self.representation_net.h_dim, **policy_net_kwargs)
else:
self.policy_net = get_output_network_from_type(policy_net_type)(
**policy_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.policy_net(feat, *args, **kwargs)
return output, cell_state
@property
def actor_trainable_variables(self):
return self.policy_net.trainable_variables
@property
def critic_trainable_variables(self):
return super().trainable_variables
@property
def weights(self):
return super().weights + self.policy_net.weights
@property
def _policy_models(self):
'''重载'''
models = super()._policy_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
class ACCNetwork(ACNetwork):
'''
Use for PD-DDPG
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {},
value_net2_type: OutputNetworkType = None,
value_net2_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net2_type)(
vector_dim=self.representation_net.h_dim, **value_net2_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net2_type)(
**value_net2_kwargs)
@property
def critic_trainable_variables(self):
return super().critic_trainable_variables + self.value_net2.trainable_variables
@property
def value_net_trainable_variables(self):
return super().critic_trainable_variables
@property
def value_net2_trainable_variables(self):
return self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ADoubleCNetwork(ACNetwork):
'''
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def critic_trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
|
def get_encoder_feature(self, s, visual_s):
'''
params:
s: [B, x]
visual_s: [B, y]
return:
feat: [B, z]
'''
if self.vector_net.use_vector and self.visual_net.use_visual:
feat = self.get_vec_feature(s)
vis_feat = self.get_vis_feature(visual_s)
feat = tf.concat([feat, vis_feat], axis=-1)
elif self.visual_net.use_visual:
vis_feat = self.get_vis_feature(visual_s)
feat = vis_feat
else:
feat = self.get_vec_feature(s)
encoder_feature = self.encoder_net(feat)
return encoder_feature
| 148 | 168 |
import numpy as np
import tensorflow as tf
from copy import deepcopy
from abc import ABC, abstractmethod
from tensorflow.keras import Model as M
from rls.utils.indexs import OutputNetworkType
from rls.nn.networks import get_visual_network_from_type
from rls.nn.models import get_output_network_from_type
from rls.nn.networks import (MultiVectorNetwork,
MultiVisualNetwork,
EncoderNetwork,
MemoryNetwork)
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
class RepresentationNetwork(ABC):
def __init__(self, name: str = 'test'):
self.name = name
self.h_dim = None
@abstractmethod
def __call__(self):
pass
@property
@abstractmethod
def trainable_variables(self):
pass
@property
@abstractmethod
def weights(self):
pass
@property
@abstractmethod
def _policy_models(self):
pass
@property
@abstractmethod
def _all_models(self):
pass
class DefaultRepresentationNetwork(RepresentationNetwork):
'''
visual_s -> visual_net -> feat ↘
feat -> encoder_net -> feat ↘ ↗ feat
s -> vector_net -> feat ↗ -> memory_net ->
cell_state ↗ ↘ cell_state
'''
def __init__(self,
name: str = 'test',
vec_dims=[],
vis_dims=[],
vector_net_kwargs: dict = {},
visual_net_kwargs: dict = {},
encoder_net_kwargs: dict = {},
memory_net_kwargs: dict = {}):
super().__init__(name)
self.vector_net = MultiVectorNetwork(vec_dims, **vector_net_kwargs)
logger.debug('initialize vector network successfully.')
self.visual_net = MultiVisualNetwork(vis_dims, **visual_net_kwargs)
logger.debug('initialize visual network successfully.')
encoder_dim = self.vector_net.h_dim + self.visual_net.h_dim
self.encoder_net = EncoderNetwork(encoder_dim, **encoder_net_kwargs)
logger.debug('initialize encoder network successfully.')
memory_dim = self.encoder_net.h_dim
self.memory_net = MemoryNetwork(memory_dim, **memory_net_kwargs)
logger.debug('initialize memory network successfully.')
self.h_dim = self.memory_net.h_dim
def split(self, batch_size, data):
'''TODO: Annotation
params:
batch_size: int
data: [B, x]
'''
if self.memory_net.use_rnn:
data = tf.reshape(data, [batch_size, -1, tf.shape(data)[-1]])
d, d_ = data[:, :-1], data[:, 1:]
d, d_ = tf.reshape(d, [-1, tf.shape(d)[-1]]), tf.reshape(d_, [-1, tf.shape(d_)[-1]])
return d, d_
else:
return tf.split(data, num_or_size_splits=2, axis=0)
def __call__(self, s, visual_s, cell_state, *, need_split=False):
'''
params:
s: [B*T, x]
visual_s: [B*T, y]
cell_state: Tuple([B, z],)
return:
feat: [B, a]
cell_state: Tuple([B, z],)
'''
batch_size = tf.shape(s)[0]
if self.memory_net.use_rnn:
s = tf.reshape(s, [-1, tf.shape(s)[-1]]) # [B, T+1, N] => [B*(T+1), N]
if self.visual_net.use_visual:
visual_s = tf.reshape(visual_s, [-1, *tf.shape(visual_s)[2:]])
feat = self.get_encoder_feature(s, visual_s)
if self.memory_net.use_rnn:
# reshape feature from [B*T, x] to [B, T, x]
feat = tf.reshape(feat, (batch_size, -1, tf.shape(feat)[-1]))
feat, cell_state = self.memory_net(feat, *cell_state)
# reshape feature from [B, T, x] to [B*T, x]
feat = tf.reshape(feat, (-1, tf.shape(feat)[-1]))
if need_split:
feat = self.split(batch_size, feat)
return feat, cell_state
def get_vis_feature(self, visual_s):
'''
params:
visual_s: [B, N, H, W, C]
return:
feat: [B, x]
'''
# TODO
viss = [visual_s[:, i] for i in range(visual_s.shape[1])]
return self.visual_net(*viss)
def get_vec_feature(self, s):
'''
params:
s: [B, x]
return:
feat: [B, y]
'''
return self.vector_net(s)
def get_encoder_feature(self, s, visual_s):
'''
params:
s: [B, x]
visual_s: [B, y]
return:
feat: [B, z]
'''
if self.vector_net.use_vector and self.visual_net.use_visual:
feat = self.get_vec_feature(s)
vis_feat = self.get_vis_feature(visual_s)
feat = tf.concat([feat, vis_feat], axis=-1)
elif self.visual_net.use_visual:
vis_feat = self.get_vis_feature(visual_s)
feat = vis_feat
else:
feat = self.get_vec_feature(s)
encoder_feature = self.encoder_net(feat)
return encoder_feature
@property
def trainable_variables(self):
tv = []
tv += self.vector_net.trainable_variables
tv += self.visual_net.trainable_variables
tv += self.encoder_net.trainable_variables
tv += self.memory_net.trainable_variables
return tv
@property
def weights(self):
ws = []
ws += self.vector_net.weights
ws += self.visual_net.weights
ws += self.encoder_net.weights
ws += self.memory_net.weights
return ws
@property
def _policy_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
@property
def _all_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
class ValueNetwork:
'''
feat -> value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
assert value_net_type is not None, 'assert value_net_type is not None'
super().__init__()
self.name = name
self.representation_net = representation_net
if self.representation_net is not None:
self.value_net = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
assert self.representation_net is not None, 'self.representation_net is not None'
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
return output, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
return output
@property
def trainable_variables(self):
tv = self.representation_net.trainable_variables if self.representation_net else []
tv += self.value_net.trainable_variables
return tv
@property
def weights(self):
ws = self.representation_net.weights if self.representation_net else []
ws += self.value_net.weights
return ws
@property
def _policy_models(self):
models = self.representation_net._policy_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
@property
def _all_models(self):
models = self.representation_net._all_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
class DoubleValueNetwork(ValueNetwork):
'''
↗ value_net1 -> outputs
feat
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ACNetwork(ValueNetwork):
'''
↗ policy_net -> outputs
feat
↘ value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.policy_net = get_output_network_from_type(policy_net_type)(
vector_dim=self.representation_net.h_dim, **policy_net_kwargs)
else:
self.policy_net = get_output_network_from_type(policy_net_type)(
**policy_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.policy_net(feat, *args, **kwargs)
return output, cell_state
@property
def actor_trainable_variables(self):
return self.policy_net.trainable_variables
@property
def critic_trainable_variables(self):
return super().trainable_variables
@property
def weights(self):
return super().weights + self.policy_net.weights
@property
def _policy_models(self):
'''重载'''
models = super()._policy_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
class ACCNetwork(ACNetwork):
'''
Use for PD-DDPG
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {},
value_net2_type: OutputNetworkType = None,
value_net2_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net2_type)(
vector_dim=self.representation_net.h_dim, **value_net2_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net2_type)(
**value_net2_kwargs)
@property
def critic_trainable_variables(self):
return super().critic_trainable_variables + self.value_net2.trainable_variables
@property
def value_net_trainable_variables(self):
return super().critic_trainable_variables
@property
def value_net2_trainable_variables(self):
return self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ADoubleCNetwork(ACNetwork):
'''
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def critic_trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
|
__init__
|
Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
"""Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
"""
# MASKED: __init__ function (lines 55-73)
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
"""Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
"""Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
"""Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
|
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
| 55 | 73 |
# -*- coding: utf-8 -*-
"""Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
"""Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
"""Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
"""Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
"""Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
|
get_client_enrichment_details
|
Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
"""Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
# MASKED: get_client_enrichment_details function (lines 75-136)
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
"""Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
"""Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
|
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
"""Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
| 75 | 136 |
# -*- coding: utf-8 -*-
"""Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
"""Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
"""Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
"""Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
"""Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
|
get_overall_client_health
|
Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
"""Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
"""Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
# MASKED: get_overall_client_health function (lines 138-196)
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
"""Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
|
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
"""Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
| 138 | 196 |
# -*- coding: utf-8 -*-
"""Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
"""Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
"""Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
"""Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
"""Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
|
get_client_detail
|
Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
"""Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
"""Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
"""Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
# MASKED: get_client_detail function (lines 198-262)
|
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
"""Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
| 198 | 262 |
# -*- coding: utf-8 -*-
"""Cisco DNA Center Clients API wrapper.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class Clients(object):
"""Cisco DNA Center Clients API (version: 1.3.1).
Wraps the DNA Center Clients
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new Clients
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(Clients, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_client_enrichment_details(self,
headers=None,
**request_parameters):
"""Enriches a given network End User context (a network user-id or
end user's device Mac Address) with details about the
user, the devices that the user is connected to and the
assurance issues that the user is impacted by.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
list: JSON response. A list of MyDict objects.
Access the object's properties by using the dot notation
or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'entity_type' in headers:
check_type(headers.get('entity_type'),
basestring, may_be_none=False)
if 'entity_value' in headers:
check_type(headers.get('entity_value'),
basestring, may_be_none=False)
if 'issueCategory' in headers:
check_type(headers.get('issueCategory'),
basestring)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
}
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-enrichment-details')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
def get_overall_client_health(self,
timestamp=None,
headers=None,
**request_parameters):
"""Returns Overall Client Health information by Client type (Wired
and Wireless) for any given point of time.
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-health')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
def get_client_detail(self,
mac_address,
timestamp=None,
headers=None,
**request_parameters):
"""Returns detailed Client information retrieved by Mac Address for
any given point of time. .
Args:
timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.
mac_address(basestring): MAC Address of the client.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(timestamp, (basestring, int))
check_type(mac_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
_params = {
'timestamp':
timestamp,
'macAddress':
mac_address,
}
if _params['timestamp'] is None:
_params['timestamp'] = ''
_params.update(request_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/client-detail')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
|
download
|
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
|
import json
import os
import re
from yandeley.models.annotations import Annotation
from yandeley.response import SessionResponseObject
class File(SessionResponseObject):
"""
A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
"""
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
"""
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
"""
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
"""
:param view: document view to return.
:return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or
:class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
"""
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
# MASKED: download function (lines 46-64)
def delete(self):
"""
Deletes the file.
"""
self.session.delete('/files/%s' % self.id)
def add_sticky_note(self, text, x_position, y_position, page_number):
"""
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
position = {'x': x_position, 'y': y_position}
bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number}
annotation = {
'document_id': self.document().id,
'text': text,
'filehash': self.filehash,
'positions': [bounding_box]
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
def add_highlight(self, bounding_boxes, color):
"""
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
annotation = {
'document_id': self.document().id,
'filehash': self.filehash,
'positions': [box.json for box in bounding_boxes],
'color': color.json
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
|
def download(self, directory):
"""
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
"""
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
| 46 | 64 |
import json
import os
import re
from yandeley.models.annotations import Annotation
from yandeley.response import SessionResponseObject
class File(SessionResponseObject):
"""
A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
"""
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
"""
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
"""
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
"""
:param view: document view to return.
:return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or
:class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
"""
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
def download(self, directory):
"""
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
"""
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
def delete(self):
"""
Deletes the file.
"""
self.session.delete('/files/%s' % self.id)
def add_sticky_note(self, text, x_position, y_position, page_number):
"""
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
position = {'x': x_position, 'y': y_position}
bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number}
annotation = {
'document_id': self.document().id,
'text': text,
'filehash': self.filehash,
'positions': [bounding_box]
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
def add_highlight(self, bounding_boxes, color):
"""
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
annotation = {
'document_id': self.document().id,
'filehash': self.filehash,
'positions': [box.json for box in bounding_boxes],
'color': color.json
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
|
add_sticky_note
|
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
|
import json
import os
import re
from yandeley.models.annotations import Annotation
from yandeley.response import SessionResponseObject
class File(SessionResponseObject):
"""
A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
"""
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
"""
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
"""
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
"""
:param view: document view to return.
:return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or
:class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
"""
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
def download(self, directory):
"""
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
"""
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
def delete(self):
"""
Deletes the file.
"""
self.session.delete('/files/%s' % self.id)
# MASKED: add_sticky_note function (lines 72-96)
def add_highlight(self, bounding_boxes, color):
"""
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
annotation = {
'document_id': self.document().id,
'filehash': self.filehash,
'positions': [box.json for box in bounding_boxes],
'color': color.json
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
|
def add_sticky_note(self, text, x_position, y_position, page_number):
"""
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
position = {'x': x_position, 'y': y_position}
bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number}
annotation = {
'document_id': self.document().id,
'text': text,
'filehash': self.filehash,
'positions': [bounding_box]
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
| 72 | 96 |
import json
import os
import re
from yandeley.models.annotations import Annotation
from yandeley.response import SessionResponseObject
class File(SessionResponseObject):
"""
A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
"""
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
"""
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
"""
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
"""
:param view: document view to return.
:return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or
:class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
"""
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
def download(self, directory):
"""
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
"""
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
def delete(self):
"""
Deletes the file.
"""
self.session.delete('/files/%s' % self.id)
def add_sticky_note(self, text, x_position, y_position, page_number):
"""
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
position = {'x': x_position, 'y': y_position}
bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number}
annotation = {
'document_id': self.document().id,
'text': text,
'filehash': self.filehash,
'positions': [bounding_box]
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
def add_highlight(self, bounding_boxes, color):
"""
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
annotation = {
'document_id': self.document().id,
'filehash': self.filehash,
'positions': [box.json for box in bounding_boxes],
'color': color.json
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
|
add_highlight
|
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
|
import json
import os
import re
from yandeley.models.annotations import Annotation
from yandeley.response import SessionResponseObject
class File(SessionResponseObject):
"""
A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
"""
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
"""
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
"""
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
"""
:param view: document view to return.
:return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or
:class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
"""
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
def download(self, directory):
"""
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
"""
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
def delete(self):
"""
Deletes the file.
"""
self.session.delete('/files/%s' % self.id)
def add_sticky_note(self, text, x_position, y_position, page_number):
"""
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
position = {'x': x_position, 'y': y_position}
bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number}
annotation = {
'document_id': self.document().id,
'text': text,
'filehash': self.filehash,
'positions': [bounding_box]
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
# MASKED: add_highlight function (lines 98-118)
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
|
def add_highlight(self, bounding_boxes, color):
"""
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
annotation = {
'document_id': self.document().id,
'filehash': self.filehash,
'positions': [box.json for box in bounding_boxes],
'color': color.json
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
| 98 | 118 |
import json
import os
import re
from yandeley.models.annotations import Annotation
from yandeley.response import SessionResponseObject
class File(SessionResponseObject):
"""
A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
"""
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
"""
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
"""
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
"""
:param view: document view to return.
:return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or
:class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
"""
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
def download(self, directory):
"""
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
"""
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
def delete(self):
"""
Deletes the file.
"""
self.session.delete('/files/%s' % self.id)
def add_sticky_note(self, text, x_position, y_position, page_number):
"""
Adds a sticky note to this file.
:param text: the text of the sticky_note.
:param x_position: the x position on the file of the sticky_note.
:param y_position: the y position on the file of the stick_note.
:param page_number: the page_number on the file of the sticky_note.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
position = {'x': x_position, 'y': y_position}
bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number}
annotation = {
'document_id': self.document().id,
'text': text,
'filehash': self.filehash,
'positions': [bounding_box]
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
def add_highlight(self, bounding_boxes, color):
"""
Adds a highlight to this file.
:param bounding_boxes: the area the highlight covers on the file.
:param color: the color of the highlight.
:return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
"""
annotation = {
'document_id': self.document().id,
'filehash': self.filehash,
'positions': [box.json for box in bounding_boxes],
'color': color.json
}
rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={
'Accept': Annotation.content_type,
'Content-Type': Annotation.content_type
})
return Annotation(self.session, rsp.json())
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
|
__init__
|
Args:
year (int): If provided, NUTS regions for this year will be used (if available)
scale (int): If provided, NUTS regions at this resolution will be used (if available)
|
"""
nuts_finder
-----------
You give it a point, it tells you all the EU NUTS regions
"""
import geojson
import requests
import re
from io import BytesIO
from zipfile import ZipFile
from shapely import geometry
from functools import lru_cache
import logging
YEAR_REGEX = "NUTS ([0-9]+)"
SCALE_REGEX = "1:([0-9]+) Million"
TOP_URL = "https://ec.europa.eu/eurostat/cache/" "GISCO/distribution/v2/nuts/download"
ZIP_URL = f"{TOP_URL}/" "ref-nuts-{year}-{scale}m.geojson.zip"
NESTED_FILE = "NUTS_RG_{scale}M_{year}_4326.geojson"
def _middle(values):
"""Lower bound of median, without using numpy (heavy reqs)"""
n = len(values)
is_odd = n % 2
middle_idx = int((n + is_odd) / 2) - 1
return sorted(values)[middle_idx]
def _setattr(obj, value, value_name, regex, selector):
"""Either apply setattr on `obj` with value `value`, if `value` is not None, otherwise
select a `value` from the available range of allowed values, selected by a custom `selector`
function.
Args:
obj: An object on which to run setattr
value: A value which if not None will be set as an attribute of object
value_name (str): The name of the new attribute
regex (str): regex string by which to find allowed values on the NUTS website.
selector (function): Function which takes an iterable and selects a value.
"""
allowed_values = _get_available(regex)
if value is None:
value = selector(allowed_values)
if value not in allowed_values:
raise ValueError(f"'{value_name}' must be one of {allowed_values}")
setattr(obj, value_name, value)
@lru_cache()
def _get_available(regex):
"""Use the provided regex to find allowed values on the NUTS website."""
r = requests.get(TOP_URL, verify=True)
values = set(int(yr) for yr in re.findall(regex, r.text))
return values
class NutsFinder:
"""
Object for holding onto NUTS data and exposing to the user, also
providing a lat, lon lookup
"""
# MASKED: __init__ function (lines 65-75)
def _get_shapes(self):
"""Load the shape files for the given year and scale"""
scale = str(self.scale).zfill(2)
filename = NESTED_FILE.format(year=self.year, scale=scale)
url = ZIP_URL.format(year=self.year, scale=scale)
r = requests.get(url, verify=True)
r.raise_for_status()
try:
with ZipFile(BytesIO(r.content)) as zipfile:
with zipfile.open(filename) as f:
shapes = geojson.load(f)
# For some reason this year/scale isn't available
except KeyError:
logging.warning(
f"No match for this year ({self.year}) and scale ({self.scale})"
)
# Remove this year from the sample and try another year
self.years.remove(self.year)
self.year = self.year_selector(self.years)
logging.warning(f"Retrying with year ({self.year})")
return self._get_shapes()
return shapes
def find(self, lat, lon):
"""Find every NUTS region for this lat, lon"""
p = geometry.Point(lon, lat)
nuts = []
for region in self.shapes["features"]:
s = geometry.shape(region["geometry"])
if s.contains(p):
nuts.append(region["properties"])
return sorted(nuts, key=lambda row: row["LEVL_CODE"])
|
def __init__(self, year=None, scale=None):
"""
Args:
year (int): If provided, NUTS regions for this year will be used (if available)
scale (int): If provided, NUTS regions at this resolution will be used (if available)
"""
self.years = list(_get_available(YEAR_REGEX))
self.year_selector = max
_setattr(self, year, "year", YEAR_REGEX, self.year_selector)
_setattr(self, scale, "scale", SCALE_REGEX, _middle) # Take the middle scale
self.shapes = self._get_shapes()
| 65 | 75 |
"""
nuts_finder
-----------
You give it a point, it tells you all the EU NUTS regions
"""
import geojson
import requests
import re
from io import BytesIO
from zipfile import ZipFile
from shapely import geometry
from functools import lru_cache
import logging
YEAR_REGEX = "NUTS ([0-9]+)"
SCALE_REGEX = "1:([0-9]+) Million"
TOP_URL = "https://ec.europa.eu/eurostat/cache/" "GISCO/distribution/v2/nuts/download"
ZIP_URL = f"{TOP_URL}/" "ref-nuts-{year}-{scale}m.geojson.zip"
NESTED_FILE = "NUTS_RG_{scale}M_{year}_4326.geojson"
def _middle(values):
"""Lower bound of median, without using numpy (heavy reqs)"""
n = len(values)
is_odd = n % 2
middle_idx = int((n + is_odd) / 2) - 1
return sorted(values)[middle_idx]
def _setattr(obj, value, value_name, regex, selector):
"""Either apply setattr on `obj` with value `value`, if `value` is not None, otherwise
select a `value` from the available range of allowed values, selected by a custom `selector`
function.
Args:
obj: An object on which to run setattr
value: A value which if not None will be set as an attribute of object
value_name (str): The name of the new attribute
regex (str): regex string by which to find allowed values on the NUTS website.
selector (function): Function which takes an iterable and selects a value.
"""
allowed_values = _get_available(regex)
if value is None:
value = selector(allowed_values)
if value not in allowed_values:
raise ValueError(f"'{value_name}' must be one of {allowed_values}")
setattr(obj, value_name, value)
@lru_cache()
def _get_available(regex):
"""Use the provided regex to find allowed values on the NUTS website."""
r = requests.get(TOP_URL, verify=True)
values = set(int(yr) for yr in re.findall(regex, r.text))
return values
class NutsFinder:
"""
Object for holding onto NUTS data and exposing to the user, also
providing a lat, lon lookup
"""
def __init__(self, year=None, scale=None):
"""
Args:
year (int): If provided, NUTS regions for this year will be used (if available)
scale (int): If provided, NUTS regions at this resolution will be used (if available)
"""
self.years = list(_get_available(YEAR_REGEX))
self.year_selector = max
_setattr(self, year, "year", YEAR_REGEX, self.year_selector)
_setattr(self, scale, "scale", SCALE_REGEX, _middle) # Take the middle scale
self.shapes = self._get_shapes()
def _get_shapes(self):
"""Load the shape files for the given year and scale"""
scale = str(self.scale).zfill(2)
filename = NESTED_FILE.format(year=self.year, scale=scale)
url = ZIP_URL.format(year=self.year, scale=scale)
r = requests.get(url, verify=True)
r.raise_for_status()
try:
with ZipFile(BytesIO(r.content)) as zipfile:
with zipfile.open(filename) as f:
shapes = geojson.load(f)
# For some reason this year/scale isn't available
except KeyError:
logging.warning(
f"No match for this year ({self.year}) and scale ({self.scale})"
)
# Remove this year from the sample and try another year
self.years.remove(self.year)
self.year = self.year_selector(self.years)
logging.warning(f"Retrying with year ({self.year})")
return self._get_shapes()
return shapes
def find(self, lat, lon):
"""Find every NUTS region for this lat, lon"""
p = geometry.Point(lon, lat)
nuts = []
for region in self.shapes["features"]:
s = geometry.shape(region["geometry"])
if s.contains(p):
nuts.append(region["properties"])
return sorted(nuts, key=lambda row: row["LEVL_CODE"])
|
bleu_score
|
Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
# MASKED: bleu_score function (lines 190-207)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
| 190 | 207 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
rouge_2_fscore
|
ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
# MASKED: rouge_2_fscore function (lines 296-312)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
| 296 | 312 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
_get_ngrams
|
Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
# MASKED: _get_ngrams function (lines 315-330)
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
| 315 | 330 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
rouge_l_fscore
|
ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
# MASKED: rouge_l_fscore function (lines 373-389)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
| 373 | 389 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
_len_lcs
|
Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
# MASKED: _len_lcs function (lines 426-440)
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
| 426 | 440 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
_f_lcs
|
Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
# MASKED: _f_lcs function (lines 470-490)
|
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
| 470 | 490 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
text_to_sequence
|
Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
|
# -*- coding: utf-8 -*-
import re
from packaging import version
import phonemizer
from phonemizer.phonemize import phonemize
from TTS.utils.text import cleaners
from TTS.utils.text.symbols import make_symbols, symbols, phonemes, _phoneme_punctuations, _bos, \
_eos
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
_phonemes_to_id = {s: i for i, s in enumerate(phonemes)}
_id_to_phonemes = {i: s for i, s in enumerate(phonemes)}
# Regular expression matching text enclosed in curly braces:
_CURLY_RE = re.compile(r'(.*?)\{(.+?)\}(.*)')
# Regular expression matching punctuations, ignoring empty space
PHONEME_PUNCTUATION_PATTERN = r'['+_phoneme_punctuations+']+'
def text2phone(text, language):
'''
Convert graphemes to phonemes.
'''
seperator = phonemizer.separator.Separator(' |', '', '|')
#try:
punctuations = re.findall(PHONEME_PUNCTUATION_PATTERN, text)
if version.parse(phonemizer.__version__) < version.parse('2.1'):
ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language)
ph = ph[:-1].strip() # skip the last empty character
# phonemizer does not tackle punctuations. Here we do.
# Replace \n with matching punctuations.
if punctuations:
# if text ends with a punctuation.
if text[-1] == punctuations[-1]:
for punct in punctuations[:-1]:
ph = ph.replace('| |\n', '|'+punct+'| |', 1)
ph = ph + punctuations[-1]
else:
for punct in punctuations:
ph = ph.replace('| |\n', '|'+punct+'| |', 1)
elif version.parse(phonemizer.__version__) >= version.parse('2.1'):
ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language, preserve_punctuation=True)
# this is a simple fix for phonemizer.
# https://github.com/bootphon/phonemizer/issues/32
if punctuations:
for punctuation in punctuations:
ph = ph.replace(f"| |{punctuation} ", f"|{punctuation}| |").replace(f"| |{punctuation}", f"|{punctuation}| |")
ph = ph[:-3]
else:
raise RuntimeError(" [!] Use 'phonemizer' version 2.1 or older.")
return ph
def pad_with_eos_bos(phoneme_sequence, tp=None):
# pylint: disable=global-statement
global _phonemes_to_id, _bos, _eos
if tp:
_bos = tp['bos']
_eos = tp['eos']
_, _phonemes = make_symbols(**tp)
_phonemes_to_id = {s: i for i, s in enumerate(_phonemes)}
return [_phonemes_to_id[_bos]] + list(phoneme_sequence) + [_phonemes_to_id[_eos]]
def phoneme_to_sequence(text, cleaner_names, language, enable_eos_bos=False, tp=None):
# pylint: disable=global-statement
global _phonemes_to_id
if tp:
_, _phonemes = make_symbols(**tp)
_phonemes_to_id = {s: i for i, s in enumerate(_phonemes)}
sequence = []
text = text.replace(":", "")
clean_text = _clean_text(text, cleaner_names)
to_phonemes = text2phone(clean_text, language)
if to_phonemes is None:
print("!! After phoneme conversion the result is None. -- {} ".format(clean_text))
# iterate by skipping empty strings - NOTE: might be useful to keep it to have a better intonation.
for phoneme in filter(None, to_phonemes.split('|')):
sequence += _phoneme_to_sequence(phoneme)
# Append EOS char
if enable_eos_bos:
sequence = pad_with_eos_bos(sequence, tp=tp)
return sequence
def sequence_to_phoneme(sequence, tp=None):
# pylint: disable=global-statement
'''Converts a sequence of IDs back to a string'''
global _id_to_phonemes
result = ''
if tp:
_, _phonemes = make_symbols(**tp)
_id_to_phonemes = {i: s for i, s in enumerate(_phonemes)}
for symbol_id in sequence:
if symbol_id in _id_to_phonemes:
s = _id_to_phonemes[symbol_id]
result += s
return result.replace('}{', ' ')
# MASKED: text_to_sequence function (lines 110-140)
def sequence_to_text(sequence, tp=None):
'''Converts a sequence of IDs back to a string'''
# pylint: disable=global-statement
global _id_to_symbol
if tp:
_symbols, _ = make_symbols(**tp)
_id_to_symbol = {i: s for i, s in enumerate(_symbols)}
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(syms):
return [_symbol_to_id[s] for s in syms if _should_keep_symbol(s)]
def _phoneme_to_sequence(phons):
return [_phonemes_to_id[s] for s in list(phons) if _should_keep_phoneme(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s not in ['~', '^', '_']
def _should_keep_phoneme(p):
return p in _phonemes_to_id and p not in ['~', '^', '_']
|
def text_to_sequence(text, cleaner_names, tp=None):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
# pylint: disable=global-statement
global _symbol_to_id
if tp:
_symbols, _ = make_symbols(**tp)
_symbol_to_id = {s: i for i, s in enumerate(_symbols)}
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while text:
m = _CURLY_RE.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(
_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
| 110 | 140 |
# -*- coding: utf-8 -*-
import re
from packaging import version
import phonemizer
from phonemizer.phonemize import phonemize
from TTS.utils.text import cleaners
from TTS.utils.text.symbols import make_symbols, symbols, phonemes, _phoneme_punctuations, _bos, \
_eos
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
_phonemes_to_id = {s: i for i, s in enumerate(phonemes)}
_id_to_phonemes = {i: s for i, s in enumerate(phonemes)}
# Regular expression matching text enclosed in curly braces:
_CURLY_RE = re.compile(r'(.*?)\{(.+?)\}(.*)')
# Regular expression matching punctuations, ignoring empty space
PHONEME_PUNCTUATION_PATTERN = r'['+_phoneme_punctuations+']+'
def text2phone(text, language):
'''
Convert graphemes to phonemes.
'''
seperator = phonemizer.separator.Separator(' |', '', '|')
#try:
punctuations = re.findall(PHONEME_PUNCTUATION_PATTERN, text)
if version.parse(phonemizer.__version__) < version.parse('2.1'):
ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language)
ph = ph[:-1].strip() # skip the last empty character
# phonemizer does not tackle punctuations. Here we do.
# Replace \n with matching punctuations.
if punctuations:
# if text ends with a punctuation.
if text[-1] == punctuations[-1]:
for punct in punctuations[:-1]:
ph = ph.replace('| |\n', '|'+punct+'| |', 1)
ph = ph + punctuations[-1]
else:
for punct in punctuations:
ph = ph.replace('| |\n', '|'+punct+'| |', 1)
elif version.parse(phonemizer.__version__) >= version.parse('2.1'):
ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language, preserve_punctuation=True)
# this is a simple fix for phonemizer.
# https://github.com/bootphon/phonemizer/issues/32
if punctuations:
for punctuation in punctuations:
ph = ph.replace(f"| |{punctuation} ", f"|{punctuation}| |").replace(f"| |{punctuation}", f"|{punctuation}| |")
ph = ph[:-3]
else:
raise RuntimeError(" [!] Use 'phonemizer' version 2.1 or older.")
return ph
def pad_with_eos_bos(phoneme_sequence, tp=None):
# pylint: disable=global-statement
global _phonemes_to_id, _bos, _eos
if tp:
_bos = tp['bos']
_eos = tp['eos']
_, _phonemes = make_symbols(**tp)
_phonemes_to_id = {s: i for i, s in enumerate(_phonemes)}
return [_phonemes_to_id[_bos]] + list(phoneme_sequence) + [_phonemes_to_id[_eos]]
def phoneme_to_sequence(text, cleaner_names, language, enable_eos_bos=False, tp=None):
# pylint: disable=global-statement
global _phonemes_to_id
if tp:
_, _phonemes = make_symbols(**tp)
_phonemes_to_id = {s: i for i, s in enumerate(_phonemes)}
sequence = []
text = text.replace(":", "")
clean_text = _clean_text(text, cleaner_names)
to_phonemes = text2phone(clean_text, language)
if to_phonemes is None:
print("!! After phoneme conversion the result is None. -- {} ".format(clean_text))
# iterate by skipping empty strings - NOTE: might be useful to keep it to have a better intonation.
for phoneme in filter(None, to_phonemes.split('|')):
sequence += _phoneme_to_sequence(phoneme)
# Append EOS char
if enable_eos_bos:
sequence = pad_with_eos_bos(sequence, tp=tp)
return sequence
def sequence_to_phoneme(sequence, tp=None):
# pylint: disable=global-statement
'''Converts a sequence of IDs back to a string'''
global _id_to_phonemes
result = ''
if tp:
_, _phonemes = make_symbols(**tp)
_id_to_phonemes = {i: s for i, s in enumerate(_phonemes)}
for symbol_id in sequence:
if symbol_id in _id_to_phonemes:
s = _id_to_phonemes[symbol_id]
result += s
return result.replace('}{', ' ')
def text_to_sequence(text, cleaner_names, tp=None):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
# pylint: disable=global-statement
global _symbol_to_id
if tp:
_symbols, _ = make_symbols(**tp)
_symbol_to_id = {s: i for i, s in enumerate(_symbols)}
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while text:
m = _CURLY_RE.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(
_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence, tp=None):
'''Converts a sequence of IDs back to a string'''
# pylint: disable=global-statement
global _id_to_symbol
if tp:
_symbols, _ = make_symbols(**tp)
_id_to_symbol = {i: s for i, s in enumerate(_symbols)}
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(syms):
return [_symbol_to_id[s] for s in syms if _should_keep_symbol(s)]
def _phoneme_to_sequence(phons):
return [_phonemes_to_id[s] for s in list(phons) if _should_keep_phoneme(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s not in ['~', '^', '_']
def _should_keep_phoneme(p):
return p in _phonemes_to_id and p not in ['~', '^', '_']
|
plot_contours
|
Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
|
import cv2.cv2 as cv2
import skimage.io as io
from skimage.transform import downscale_local_mean
import numpy as np
from model import *
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from images_to_arr import *
import pickle
import csv
def removeBackground(img_in):
Img_backless = np.copy(img_in)
Img_backless = np.subtract(np.multiply(Img_backless,1.11),0.11)
Img_backless[Img_backless < 0] = 0
return Img_backless
def newBBcoords(img_pred_Log,test_image):
# returns coordinates of the bounding box for the region with the largest area
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
labelsImg = np.multiply(np.array(labelsLog, np.uint8),255)
#myShowImage(labelsImg)
sizeBoxX = regionsLog[maxIndex]['bbox'][3]-regionsLog[maxIndex]['bbox'][1]
sizeBoxY = regionsLog[maxIndex]['bbox'][2]-regionsLog[maxIndex]['bbox'][0]
coordsBbox = list(regionsLog[maxIndex]['bbox'])
if sizeBoxX <= 0.5 * img_pred_Log.shape[1]:
newSizeBoxX = 0.3 / (sizeBoxX / img_pred_Log.shape[1])
coordsBbox[1] = coordsBbox[1] - sizeBoxX*(0.5*(newSizeBoxX-1))
coordsBbox[3] = coordsBbox[3] + sizeBoxX*(0.5*(newSizeBoxX-1))
if sizeBoxY <= 0.5 * img_pred_Log.shape[0]:
newSizeBoxY = 0.5 / (sizeBoxY / img_pred_Log.shape[0])
coordsBbox[0] = coordsBbox[0] - sizeBoxY*(0.5*(newSizeBoxY-1))
coordsBbox[2] = coordsBbox[2] + sizeBoxY*(0.5*(newSizeBoxY-1))
if coordsBbox[0] < 0:
coordsBbox[0] = 0
if coordsBbox[1] < 0:
coordsBbox[1] = 0
if coordsBbox[2] > test_image.shape[0]:
coordsBbox[2] = test_image.shape[0] - 1
if coordsBbox[3] > test_image.shape[1]:
coordsBbox[3] = test_image.shape[1] - 1
coordsBboxInt = [round(x) for x in coordsBbox]
return coordsBboxInt
def getLargestAreaEcentroid(img_pred_Log):
# returns mask with the regions with the largest area, coords of centroid and radius
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
centreCoords = np.round(regionsLog[maxIndex]['centroid'])
centreCoords = centreCoords.astype(np.uint)
radius = (regionsLog[maxIndex]['major_axis_length'] + regionsLog[maxIndex]['minor_axis_length']) / 4
colsCoord = [regionsLog[maxIndex]['bbox'][1],regionsLog[maxIndex]['bbox'][3]]
labelsArr = np.array(labelsLog)
return labelsArr, centreCoords, radius, colsCoord
image_arr = np.load('image_arr.npy')
mask_arr = np.load('mask_arr.npy')
image_arr_red_channels = np.load('image_arr_red_channels.npy')
image_arr_green_channels = np.load('image_arr_green_channels.npy')
image_arr_blue_channels = np.load('image_arr_blue_channels.npy')
entropy = np.load('entropy_arr.npy')
elips = np.load('elips_arr.npy')
vessels = np.load('vessels_arr.npy')
test_image = np.zeros(image_arr[0].shape)
test_image_mask = np.zeros(mask_arr[0].shape)
test_img_RC = np.zeros(image_arr[0].shape)
test_img_GC = np.zeros(image_arr[0].shape)
test_img_BC = np.zeros(image_arr[0].shape)
entropy_arr = np.zeros(image_arr[0].shape)
elips_arr = np.zeros(image_arr[0].shape)
ODROILog = []
ODROIBay = []
getClassifiers = False
if getClassifiers:
X_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,4])
Y_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,1])
for j in range(0,40):
for i in range(0,40): # Get train data
if i == j:
continue
test_image = image_arr[i]
test_image_mask = mask_arr[i]
labels, num = label(test_image_mask, neighbors=8, background = 0, return_num = True)
regions = regionprops(labels)
centreCoords = np.round(regions[0]['centroid'])
centreCoords = centreCoords.astype(np.uint)
centreMask = np.zeros(test_image_mask.shape)
centreMask[centreCoords[0],centreCoords[1]] = 1
#Change here!
#test_image_mask = centreMask
test_image_RC = image_arr_red_channels[i]
test_image_GC = image_arr_green_channels[i]
test_image_BC = image_arr_blue_channels[i]
entropy_arr = entropy[i]
elips_arr = elips[i]
#test_image_RC = removeBackground(test_image_RC)
#test_image = removeBackground(test_image)
imageIndxs = np.where(test_image != 0)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
redChannel_Arr = np.squeeze(test_image_RC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
redChannel_Arr = (redChannel_Arr-np.average(redChannel_Arr)) / np.std(redChannel_Arr)
entropy_arr = np.squeeze(entropy_arr.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#entropy_arr = (entropy_arr-np.average(entropy_arr)) / np.std(entropy_arr)
# Distance Array
indices_Arr = np.indices((test_image.shape[0],test_image.shape[1])).transpose((1,2,0))
centreCoords = np.array([test_image.shape[0]/2,test_image.shape[1]/2])
distance_Arr = np.sqrt(np.add(np.power(indices_Arr[...,0]-centreCoords[0],2),np.power(indices_Arr[...,1]-centreCoords[1],2)))
normDistance_Arr = distance_Arr / np.max(distance_Arr)
normDistanceColumn_Arr = np.squeeze(normDistance_Arr.reshape([1,normDistance_Arr.shape[0]*normDistance_Arr.shape[1]])).T
X_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],...] = np.column_stack((redChannel_Arr,entropy_arr,normDistanceColumn_Arr, intensityColumn_Arr))#,
Y_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
'''
f = open('my_classifier.pickle', 'rb')
classifier = pickle.load(f)
f.close()
'''
test_image2 = np.zeros(image_arr[0].shape)
test_image_mask2 = np.zeros(mask_arr[0].shape)
test_img_RC2 = np.zeros(image_arr[0].shape)
# test_img_GC2 = np.zeros(image_arr[0].shape)
test_image2 = image_arr[j]
test_image_mask2 = mask_arr[j]
test_image_RC2 = image_arr_red_channels[j]
test_image_GC2 = image_arr_green_channels[j]
test_image_BC2 = image_arr_blue_channels[j]
entropy_arr2 = entropy[j]
intensityColumn_Arr2 = np.squeeze(test_image2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
intensityColumn_Arr2 = (intensityColumn_Arr2-np.average(intensityColumn_Arr2)) / np.std(intensityColumn_Arr2)
redChannel_Arr2 = np.squeeze(test_image_RC2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
redChannel_Arr2 = ( redChannel_Arr2 - np.average(redChannel_Arr2) ) / np.std(redChannel_Arr2)
entropy_arr = np.squeeze(entropy_arr2.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
X_val = np.column_stack((redChannel_Arr2,entropy_arr,normDistanceColumn_Arr,intensityColumn_Arr2))#,,greenChannel_Arr2))
Y_val = np.squeeze(test_image_mask2.reshape([1,test_image_mask2.shape[0]*test_image_mask2.shape[1]])).T
# predicts
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
# Y_train_reshaped = Y_train.reshape([test_image.shape[0],test_image.shape[1]])
#myShowImage(img_pred_Log,"img_pred_Log_" + str(j))
#myShowImage(img_pred_Bayes,"img_pred_Bayes_" + str(j))
try:
coordsBBLog = newBBcoords(img_pred_Log,test_image)
except:
coordsBBLog = []
try:
coordsBBBay = newBBcoords(img_pred_Bayes,test_image)
except:
coordsBBBay = []
ODROILog.append(coordsBBLog)
ODROIBay.append(coordsBBBay)
ODROILog_Arr = np.array(ODROILog)
ODROIBay_Arr = np.array(ODROIBay)
np.save('ODROILog_Arr.npy',ODROILog_Arr)
np.save('ODROIBay_Arr.npy',ODROIBay_Arr)
prepareSegments = False
if prepareSegments:
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
OD_section = []
OD_mask = []
OD_section_RC = []
lenX_Arr = 0
for i in range(0,40):
try:
coords = ODROILog_Arr[i]
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"LOG" +str(i))
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
medianFiltered = median(imgSegment,disk(25))
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
#smoothDisk = mean(smoothVessels, disk(5))
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
#coords = ODROIBay_Arr[i]
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"BAY" + str(i))
except:
coords = ODROIBay_Arr[i]
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
#medianFiltered = median(imgSegment,disk(25))
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"EXCEPT" + str(i))
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
#print('except')
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
#myShowImage(smoothVessels)
OD_section_Arr = np.array(OD_section)
OD_mask_Arr = np.array(OD_mask)
OD_section_RC = np.array(OD_section_RC)
np.save('OD_section_Arr.npy',OD_section_Arr)
np.save('OD_mask_Arr.npy',OD_mask_Arr)
np.save('OD_section_RC.npy',OD_section_RC)
print(lenX_Arr) # len = 4577126
finalSegmentation = False
finalMaskPredicts = []
if finalSegmentation:
OD_section_Arr = np.load('OD_section_Arr.npy')
OD_mask_Arr = np.load('OD_mask_Arr.npy')
OD_section_RC = np.load('OD_section_RC.npy')
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for j in range(0,40):
removeLen = OD_section_Arr[j].shape[0] * OD_section_Arr[j].shape[1]
X_train = np.zeros([4577126-removeLen,2])
Y_train = np.zeros([4577126-removeLen,1])
for i in range(0,40):
if i == j:
continue
test_image = OD_section_Arr[i]
test_image_mask = OD_mask_Arr[i]
segRC = OD_section_RC[i]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#segRC = (segRC-np.average(segRC)) / np.std(segRC)
if (i-1)*test_image.shape[0]*test_image.shape[1] < 0 and (i)*test_image.shape[0]*test_image.shape[1] == 0:
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,...] = np.column_stack((intensityColumn_Arr,segRC))#,
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
continue
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],...] = np.column_stack((intensityColumn_Arr,segRC))#,
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Segments/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Segments/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
test_image = OD_section_Arr[j]
test_image_mask = OD_mask_Arr[j]
segRC = OD_section_RC[j]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#segRC = (segRC-np.average(segRC)) / np.std(segRC)
X_val = np.column_stack((intensityColumn_Arr,segRC))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
#myShowImage(img_pred_Log,"Log")
#myShowImage(img_pred_Bayes,"Bayes")
#myShowImage(test_image,"Actual")
finalMaskPredicts.append(predictsBayes)
#print('ok')
finalMaskPredicts_Arr = np.array(finalMaskPredicts)
np.save("finalMaskPredicts_Bayes.npy",finalMaskPredicts_Arr)
loadFinalSegs = False
if loadFinalSegs:
foveaBBoxCoords = []
centroidCoord = []
ODmaskPredicts = []
elips = np.load('elips_arr.npy')
originalDimsBase = np.zeros(image_arr[0].shape)
OD_section_Arr = np.load('OD_section_Arr.npy')
finalMaskPredicts_Arr = np.load("finalMaskPredicts_Bayes.npy")
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
for i in range(0,40):
originalDims = np.copy(originalDimsBase)
test_image = OD_section_Arr[i]
maskPred = finalMaskPredicts_Arr[i].reshape([test_image.shape[0],test_image.shape[1]])
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(maskPred)
finalMaskImg = np.multiply(finalMask,255)
finalMaskImg[centroidCoords[0],centroidCoords[1]] = 255
try:
coords = ODROILog_Arr[i]
failTest = (coords[2])
except:
coords = ODROIBay_Arr[i]
failTest = (coords[2])
coordsReal =[centroidCoords[0] + coords[0],centroidCoords[1] + coords[1]]
colsCoordReal = [colsCoord[0] + coords[1],colsCoord[1] + coords[1]]
originalDims[coords[0]:coords[2],coords[1]:coords[3]] = finalMaskImg
#originalDims = originalDims or elips[i]
elipsResized = cv2.resize(elips[i], dsize=(originalDims.shape[1],originalDims.shape[0]), interpolation=cv2.INTER_CUBIC)
elipsResized = np.average(elipsResized,axis = 2) # 3 channels -> 1 channel
elipsResized[elipsResized>0.5] = 1
elipsResized[elipsResized<1] = 0
elipsResized = thin(elipsResized)
elipsIndexs = np.where(elipsResized != 0)
originalDims = originalDims.astype(np.uint8)
#originalDims[elipsIndexs] = 255
indexsOD_ELi = np.where(originalDims != 0)
#myShowImage(originalDims,str(i))
checkResults = np.copy(image_arr[i])
checkResults[indexsOD_ELi] = originalDims[indexsOD_ELi]
#checkResults[0::,np.min(elipsIndexs[1])] = 255 # left
#checkResults[0::,np.max(elipsIndexs[1])] = 255 # right
if abs(coordsReal[1]-np.min(elipsIndexs[1])) < abs(coordsReal[1]-np.max(elipsIndexs[1])):
#isleft -> walk right
#relevantColumn = coordsReal[1] + 30 # based on centroid
relevantColumn = colsCoordReal[1] - 10 # based on
columnROI_f = [coordsReal[1] + round(3*radius),coordsReal[1] + round(6*radius)]
else:
#isright -> walk left
#relevantColumn = coordsReal[1] - 30
relevantColumn = colsCoordReal[0] + 10
columnROI_f = [coordsReal[1] - round(6*radius),coordsReal[1] - round(3*radius)]
relevantRows = np.where(elipsResized[...,relevantColumn]!=0)
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[0]] = 0 # 1 - columnROI_f[0]
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[1]] = 0 # 3 - columnROI_f[1]
checkResults[relevantRows[0][0],columnROI_f[0]:columnROI_f[1]] = 0 # 0 - relevantRows[0][0]
checkResults[relevantRows[0][-1],columnROI_f[0]:columnROI_f[1]] = 0 # 2 - relevantRows[0][-1]
foveaBBoxCoords.append((relevantRows[0][0],columnROI_f[0],relevantRows[0][-1],columnROI_f[1]))
centroidCoord.append(coordsReal)
originalDims = np.divide(originalDims,255)
ODmaskPredicts.append(originalDims)
#myShowImage(originalDims,str(i))
#myShowImage(checkResults,str(i))
foveaBBoxCoords_Arr = np.array(foveaBBoxCoords)
centroidCoord_Arr = np.array(centroidCoord)
ODmaskPredicts_Arr = np.array(ODmaskPredicts)
np.save("bbox_fovea.npy",foveaBBoxCoords_Arr)
np.save("centroidCoord_Arr.npy",centroidCoord_Arr)
np.save("ODmaskPredicts_Arr.npy",ODmaskPredicts_Arr)
getFoveaGTCoords = True
if getFoveaGTCoords:
foveCoordsGT = []
tempCoords =[]
imgNo = 0
with open('Datasets/fovea_location.csv') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
#print(row)
tempCoords.append(float(row[1]))
tempCoords.append(float(row[2]))
foveCoordsGT.append(tempCoords)
tempCoords =[]
imgNo += 1
if imgNo == 40:
break
getFoveaCoordsPred = False
'''for i in range(0,40):
myShowImage(image_arr[i])
myShowImage(image_arr_red_channels[i])
myShowImage(image_arr_green_channels[i])
myShowImage(vessels[i])
myShowImage(entropy_arr[i])'''
if getFoveaCoordsPred:
foveaBBoxCoords_Arr = np.load("bbox_fovea.npy")
foveaBBoxCoords_Arr = np.absolute(foveaBBoxCoords_Arr)
removeLen = 0
realCentroidCoords_Arr = []
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for i in range(0,40): # not the best way...
if foveaBBoxCoords_Arr[i][3] < foveaBBoxCoords_Arr[i][1]:
temp = foveaBBoxCoords_Arr[i][1]
foveaBBoxCoords_Arr[i][1] = foveaBBoxCoords_Arr[i][3]
foveaBBoxCoords_Arr[i][3] = temp
if foveaBBoxCoords_Arr[i][2] < foveaBBoxCoords_Arr[i][0]:
temp = foveaBBoxCoords_Arr[i][0]
foveaBBoxCoords_Arr[i][0] = foveaBBoxCoords_Arr[i][2]
foveaBBoxCoords_Arr[i][2] = temp
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
removeLen += bboxShape[0]*bboxShape[1]
#print(removeLen)
for j in range(0,40):
removeLen = (foveaBBoxCoords_Arr[j][2]-foveaBBoxCoords_Arr[j][0]) * (foveaBBoxCoords_Arr[j][3]-foveaBBoxCoords_Arr[j][1])
X_train = np.zeros([3187816-removeLen,3]) # 3187816 = number of points in all fovea bboxs
Y_train = np.zeros([3187816-removeLen,1])
first = 0
for i in range(0,40):
if i == j:
continue
'''if foveaBBoxCoords_Arr[i][3] < foveaBBoxCoords_Arr[i][1]:
temp = foveaBBoxCoords_Arr[i][1]
foveaBBoxCoords_Arr[i][1] = foveaBBoxCoords_Arr[i][3]
foveaBBoxCoords_Arr[i][3] = temp
if foveaBBoxCoords_Arr[i][2] < foveaBBoxCoords_Arr[i][0]:
temp = foveaBBoxCoords_Arr[i][0]
foveaBBoxCoords_Arr[i][0] = foveaBBoxCoords_Arr[i][2]
foveaBBoxCoords_Arr[i][2] = temp'''
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
last = bboxShape[0]*bboxShape[1] + first
foveaRegionGC = image_arr_green_channels[i][foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
#mask
maskBig = np.zeros(test_image.shape)
coordsFoveaCenter = [round(foveCoordsGT[i][1]/4),round(foveCoordsGT[i][0]/4)]
maskBig[coordsFoveaCenter[0]-10:coordsFoveaCenter[0]+10,coordsFoveaCenter[1]-10:coordsFoveaCenter[1]+10] = 1
mask = maskBig[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
'''if (i-1)*bboxShape[0]*bboxShape[1] < 0 and (i)*bboxShape[0]*bboxShape[1] == 0:
X_train[(i-1)*bboxShape[0]*bboxShape[1]::,...] = np.column_stack((fovea_region,foveaRegionGC,highContrast))#,
Y_train[(i-1)*bboxShape[0]*bboxShape[1]::,0] = np.squeeze(mask.reshape([1,bboxShape[0]*bboxShape[1]])).T
continue'''
X_train[first:last,...] = np.column_stack((fovea_region,foveaRegionGC,highContrast))#,
Y_train[first:last,0] = np.squeeze(mask.reshape([1,bboxShape[0]*bboxShape[1]])).T
first = last
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
'''log = open('Classifiers/Segments/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Segments/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()'''
test_image = image_arr[j]
fovea_region = test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
bboxShape = fovea_region.shape
foveaRegionGC = image_arr_green_channels[j][foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
X_val = np.column_stack((fovea_region,foveaRegionGC,highContrast))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape(bboxShape)
img_pred_Bayes = predictsBayes.reshape(bboxShape)
try:
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(img_pred_Bayes)
if centroidCoords.size == 0:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
except:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
maskEyes = np.copy(finalMask)
maskEyes = np.multiply(maskEyes,255)
maskEyes = maskEyes.astype(np.uint8)
#myShowImage(test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]],"fovea")
#myShowImage(maskEyes,"Mask")
#myShowImage(img_pred_Bayes,"Bay")
realCentroidCoords = [centroidCoords[0] + foveaBBoxCoords_Arr[j][0],centroidCoords[1] + foveaBBoxCoords_Arr[j][1]]
realCentroidCoords_Arr.append(realCentroidCoords)
realCentroidCoords_Arr = np.array(realCentroidCoords_Arr)
np.save('fovea_centre_coords.npy',realCentroidCoords_Arr)
#centroidCoord_Arr = np.load("centroidCoord_Arr.npy")
#ODmaskPredicts_Arr = np.load("ODmaskPredicts_Arr.npy")
#for i in range(0,40):
showGraphsClass= False
if showGraphsClass:
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
# MASKED: plot_contours function (lines 749-766)
## import some data to play with
#iris = datasets.load_iris()
## Take the first two features. We could avoid this by using a two-dim dataset
#X = iris.data[:, :2]
#y = iris.target
X = X_train_2
y = y_train_2
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
models = (clf_bayes, clf_log) #, clf_svm, clf_svm_rbf)
# title for the plots
titles = ('Bayes',
'Logistic regression')
''' ,
'SVC with linear kernel',
'SVM with RBF kernel')'''
# Set-up 2x2 grid for plotting.
#fig, sub =
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[0::500, 0], X[0::500, 1]
xx, yy = make_meshgrid(X0, X1,h=0.005)
'''_,ax_all = plt.subplots(1,2)
ax = ax_all[1]
plot_contours(ax, clf_bayes, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title("Bayes")
plt.show()'''
showPlots = False
if showPlots:
for clf, title in zip(models, titles):
_,ax_all = plt.subplots(1,2)
ax = ax_all[0]
plot_contours(ax, clf, xx, yy, proba=True, # changed proba to probability
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax = ax_all[1]
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
print("Done")
|
def plot_contours(ax, clf, xx, yy, proba=False, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
if proba:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,-1]
else:
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z,20, **params)
return out
| 749 | 766 |
import cv2.cv2 as cv2
import skimage.io as io
from skimage.transform import downscale_local_mean
import numpy as np
from model import *
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from images_to_arr import *
import pickle
import csv
def removeBackground(img_in):
Img_backless = np.copy(img_in)
Img_backless = np.subtract(np.multiply(Img_backless,1.11),0.11)
Img_backless[Img_backless < 0] = 0
return Img_backless
def newBBcoords(img_pred_Log,test_image):
# returns coordinates of the bounding box for the region with the largest area
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
labelsImg = np.multiply(np.array(labelsLog, np.uint8),255)
#myShowImage(labelsImg)
sizeBoxX = regionsLog[maxIndex]['bbox'][3]-regionsLog[maxIndex]['bbox'][1]
sizeBoxY = regionsLog[maxIndex]['bbox'][2]-regionsLog[maxIndex]['bbox'][0]
coordsBbox = list(regionsLog[maxIndex]['bbox'])
if sizeBoxX <= 0.5 * img_pred_Log.shape[1]:
newSizeBoxX = 0.3 / (sizeBoxX / img_pred_Log.shape[1])
coordsBbox[1] = coordsBbox[1] - sizeBoxX*(0.5*(newSizeBoxX-1))
coordsBbox[3] = coordsBbox[3] + sizeBoxX*(0.5*(newSizeBoxX-1))
if sizeBoxY <= 0.5 * img_pred_Log.shape[0]:
newSizeBoxY = 0.5 / (sizeBoxY / img_pred_Log.shape[0])
coordsBbox[0] = coordsBbox[0] - sizeBoxY*(0.5*(newSizeBoxY-1))
coordsBbox[2] = coordsBbox[2] + sizeBoxY*(0.5*(newSizeBoxY-1))
if coordsBbox[0] < 0:
coordsBbox[0] = 0
if coordsBbox[1] < 0:
coordsBbox[1] = 0
if coordsBbox[2] > test_image.shape[0]:
coordsBbox[2] = test_image.shape[0] - 1
if coordsBbox[3] > test_image.shape[1]:
coordsBbox[3] = test_image.shape[1] - 1
coordsBboxInt = [round(x) for x in coordsBbox]
return coordsBboxInt
def getLargestAreaEcentroid(img_pred_Log):
# returns mask with the regions with the largest area, coords of centroid and radius
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
centreCoords = np.round(regionsLog[maxIndex]['centroid'])
centreCoords = centreCoords.astype(np.uint)
radius = (regionsLog[maxIndex]['major_axis_length'] + regionsLog[maxIndex]['minor_axis_length']) / 4
colsCoord = [regionsLog[maxIndex]['bbox'][1],regionsLog[maxIndex]['bbox'][3]]
labelsArr = np.array(labelsLog)
return labelsArr, centreCoords, radius, colsCoord
image_arr = np.load('image_arr.npy')
mask_arr = np.load('mask_arr.npy')
image_arr_red_channels = np.load('image_arr_red_channels.npy')
image_arr_green_channels = np.load('image_arr_green_channels.npy')
image_arr_blue_channels = np.load('image_arr_blue_channels.npy')
entropy = np.load('entropy_arr.npy')
elips = np.load('elips_arr.npy')
vessels = np.load('vessels_arr.npy')
test_image = np.zeros(image_arr[0].shape)
test_image_mask = np.zeros(mask_arr[0].shape)
test_img_RC = np.zeros(image_arr[0].shape)
test_img_GC = np.zeros(image_arr[0].shape)
test_img_BC = np.zeros(image_arr[0].shape)
entropy_arr = np.zeros(image_arr[0].shape)
elips_arr = np.zeros(image_arr[0].shape)
ODROILog = []
ODROIBay = []
getClassifiers = False
if getClassifiers:
X_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,4])
Y_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,1])
for j in range(0,40):
for i in range(0,40): # Get train data
if i == j:
continue
test_image = image_arr[i]
test_image_mask = mask_arr[i]
labels, num = label(test_image_mask, neighbors=8, background = 0, return_num = True)
regions = regionprops(labels)
centreCoords = np.round(regions[0]['centroid'])
centreCoords = centreCoords.astype(np.uint)
centreMask = np.zeros(test_image_mask.shape)
centreMask[centreCoords[0],centreCoords[1]] = 1
#Change here!
#test_image_mask = centreMask
test_image_RC = image_arr_red_channels[i]
test_image_GC = image_arr_green_channels[i]
test_image_BC = image_arr_blue_channels[i]
entropy_arr = entropy[i]
elips_arr = elips[i]
#test_image_RC = removeBackground(test_image_RC)
#test_image = removeBackground(test_image)
imageIndxs = np.where(test_image != 0)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
redChannel_Arr = np.squeeze(test_image_RC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
redChannel_Arr = (redChannel_Arr-np.average(redChannel_Arr)) / np.std(redChannel_Arr)
entropy_arr = np.squeeze(entropy_arr.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#entropy_arr = (entropy_arr-np.average(entropy_arr)) / np.std(entropy_arr)
# Distance Array
indices_Arr = np.indices((test_image.shape[0],test_image.shape[1])).transpose((1,2,0))
centreCoords = np.array([test_image.shape[0]/2,test_image.shape[1]/2])
distance_Arr = np.sqrt(np.add(np.power(indices_Arr[...,0]-centreCoords[0],2),np.power(indices_Arr[...,1]-centreCoords[1],2)))
normDistance_Arr = distance_Arr / np.max(distance_Arr)
normDistanceColumn_Arr = np.squeeze(normDistance_Arr.reshape([1,normDistance_Arr.shape[0]*normDistance_Arr.shape[1]])).T
X_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],...] = np.column_stack((redChannel_Arr,entropy_arr,normDistanceColumn_Arr, intensityColumn_Arr))#,
Y_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
'''
f = open('my_classifier.pickle', 'rb')
classifier = pickle.load(f)
f.close()
'''
test_image2 = np.zeros(image_arr[0].shape)
test_image_mask2 = np.zeros(mask_arr[0].shape)
test_img_RC2 = np.zeros(image_arr[0].shape)
# test_img_GC2 = np.zeros(image_arr[0].shape)
test_image2 = image_arr[j]
test_image_mask2 = mask_arr[j]
test_image_RC2 = image_arr_red_channels[j]
test_image_GC2 = image_arr_green_channels[j]
test_image_BC2 = image_arr_blue_channels[j]
entropy_arr2 = entropy[j]
intensityColumn_Arr2 = np.squeeze(test_image2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
intensityColumn_Arr2 = (intensityColumn_Arr2-np.average(intensityColumn_Arr2)) / np.std(intensityColumn_Arr2)
redChannel_Arr2 = np.squeeze(test_image_RC2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
redChannel_Arr2 = ( redChannel_Arr2 - np.average(redChannel_Arr2) ) / np.std(redChannel_Arr2)
entropy_arr = np.squeeze(entropy_arr2.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
X_val = np.column_stack((redChannel_Arr2,entropy_arr,normDistanceColumn_Arr,intensityColumn_Arr2))#,,greenChannel_Arr2))
Y_val = np.squeeze(test_image_mask2.reshape([1,test_image_mask2.shape[0]*test_image_mask2.shape[1]])).T
# predicts
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
# Y_train_reshaped = Y_train.reshape([test_image.shape[0],test_image.shape[1]])
#myShowImage(img_pred_Log,"img_pred_Log_" + str(j))
#myShowImage(img_pred_Bayes,"img_pred_Bayes_" + str(j))
try:
coordsBBLog = newBBcoords(img_pred_Log,test_image)
except:
coordsBBLog = []
try:
coordsBBBay = newBBcoords(img_pred_Bayes,test_image)
except:
coordsBBBay = []
ODROILog.append(coordsBBLog)
ODROIBay.append(coordsBBBay)
ODROILog_Arr = np.array(ODROILog)
ODROIBay_Arr = np.array(ODROIBay)
np.save('ODROILog_Arr.npy',ODROILog_Arr)
np.save('ODROIBay_Arr.npy',ODROIBay_Arr)
prepareSegments = False
if prepareSegments:
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
OD_section = []
OD_mask = []
OD_section_RC = []
lenX_Arr = 0
for i in range(0,40):
try:
coords = ODROILog_Arr[i]
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"LOG" +str(i))
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
medianFiltered = median(imgSegment,disk(25))
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
#smoothDisk = mean(smoothVessels, disk(5))
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
#coords = ODROIBay_Arr[i]
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"BAY" + str(i))
except:
coords = ODROIBay_Arr[i]
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
#medianFiltered = median(imgSegment,disk(25))
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"EXCEPT" + str(i))
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
#print('except')
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
#myShowImage(smoothVessels)
OD_section_Arr = np.array(OD_section)
OD_mask_Arr = np.array(OD_mask)
OD_section_RC = np.array(OD_section_RC)
np.save('OD_section_Arr.npy',OD_section_Arr)
np.save('OD_mask_Arr.npy',OD_mask_Arr)
np.save('OD_section_RC.npy',OD_section_RC)
print(lenX_Arr) # len = 4577126
finalSegmentation = False
finalMaskPredicts = []
if finalSegmentation:
OD_section_Arr = np.load('OD_section_Arr.npy')
OD_mask_Arr = np.load('OD_mask_Arr.npy')
OD_section_RC = np.load('OD_section_RC.npy')
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for j in range(0,40):
removeLen = OD_section_Arr[j].shape[0] * OD_section_Arr[j].shape[1]
X_train = np.zeros([4577126-removeLen,2])
Y_train = np.zeros([4577126-removeLen,1])
for i in range(0,40):
if i == j:
continue
test_image = OD_section_Arr[i]
test_image_mask = OD_mask_Arr[i]
segRC = OD_section_RC[i]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#segRC = (segRC-np.average(segRC)) / np.std(segRC)
if (i-1)*test_image.shape[0]*test_image.shape[1] < 0 and (i)*test_image.shape[0]*test_image.shape[1] == 0:
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,...] = np.column_stack((intensityColumn_Arr,segRC))#,
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
continue
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],...] = np.column_stack((intensityColumn_Arr,segRC))#,
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Segments/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Segments/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
test_image = OD_section_Arr[j]
test_image_mask = OD_mask_Arr[j]
segRC = OD_section_RC[j]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#segRC = (segRC-np.average(segRC)) / np.std(segRC)
X_val = np.column_stack((intensityColumn_Arr,segRC))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
#myShowImage(img_pred_Log,"Log")
#myShowImage(img_pred_Bayes,"Bayes")
#myShowImage(test_image,"Actual")
finalMaskPredicts.append(predictsBayes)
#print('ok')
finalMaskPredicts_Arr = np.array(finalMaskPredicts)
np.save("finalMaskPredicts_Bayes.npy",finalMaskPredicts_Arr)
loadFinalSegs = False
if loadFinalSegs:
foveaBBoxCoords = []
centroidCoord = []
ODmaskPredicts = []
elips = np.load('elips_arr.npy')
originalDimsBase = np.zeros(image_arr[0].shape)
OD_section_Arr = np.load('OD_section_Arr.npy')
finalMaskPredicts_Arr = np.load("finalMaskPredicts_Bayes.npy")
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
for i in range(0,40):
originalDims = np.copy(originalDimsBase)
test_image = OD_section_Arr[i]
maskPred = finalMaskPredicts_Arr[i].reshape([test_image.shape[0],test_image.shape[1]])
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(maskPred)
finalMaskImg = np.multiply(finalMask,255)
finalMaskImg[centroidCoords[0],centroidCoords[1]] = 255
try:
coords = ODROILog_Arr[i]
failTest = (coords[2])
except:
coords = ODROIBay_Arr[i]
failTest = (coords[2])
coordsReal =[centroidCoords[0] + coords[0],centroidCoords[1] + coords[1]]
colsCoordReal = [colsCoord[0] + coords[1],colsCoord[1] + coords[1]]
originalDims[coords[0]:coords[2],coords[1]:coords[3]] = finalMaskImg
#originalDims = originalDims or elips[i]
elipsResized = cv2.resize(elips[i], dsize=(originalDims.shape[1],originalDims.shape[0]), interpolation=cv2.INTER_CUBIC)
elipsResized = np.average(elipsResized,axis = 2) # 3 channels -> 1 channel
elipsResized[elipsResized>0.5] = 1
elipsResized[elipsResized<1] = 0
elipsResized = thin(elipsResized)
elipsIndexs = np.where(elipsResized != 0)
originalDims = originalDims.astype(np.uint8)
#originalDims[elipsIndexs] = 255
indexsOD_ELi = np.where(originalDims != 0)
#myShowImage(originalDims,str(i))
checkResults = np.copy(image_arr[i])
checkResults[indexsOD_ELi] = originalDims[indexsOD_ELi]
#checkResults[0::,np.min(elipsIndexs[1])] = 255 # left
#checkResults[0::,np.max(elipsIndexs[1])] = 255 # right
if abs(coordsReal[1]-np.min(elipsIndexs[1])) < abs(coordsReal[1]-np.max(elipsIndexs[1])):
#isleft -> walk right
#relevantColumn = coordsReal[1] + 30 # based on centroid
relevantColumn = colsCoordReal[1] - 10 # based on
columnROI_f = [coordsReal[1] + round(3*radius),coordsReal[1] + round(6*radius)]
else:
#isright -> walk left
#relevantColumn = coordsReal[1] - 30
relevantColumn = colsCoordReal[0] + 10
columnROI_f = [coordsReal[1] - round(6*radius),coordsReal[1] - round(3*radius)]
relevantRows = np.where(elipsResized[...,relevantColumn]!=0)
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[0]] = 0 # 1 - columnROI_f[0]
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[1]] = 0 # 3 - columnROI_f[1]
checkResults[relevantRows[0][0],columnROI_f[0]:columnROI_f[1]] = 0 # 0 - relevantRows[0][0]
checkResults[relevantRows[0][-1],columnROI_f[0]:columnROI_f[1]] = 0 # 2 - relevantRows[0][-1]
foveaBBoxCoords.append((relevantRows[0][0],columnROI_f[0],relevantRows[0][-1],columnROI_f[1]))
centroidCoord.append(coordsReal)
originalDims = np.divide(originalDims,255)
ODmaskPredicts.append(originalDims)
#myShowImage(originalDims,str(i))
#myShowImage(checkResults,str(i))
foveaBBoxCoords_Arr = np.array(foveaBBoxCoords)
centroidCoord_Arr = np.array(centroidCoord)
ODmaskPredicts_Arr = np.array(ODmaskPredicts)
np.save("bbox_fovea.npy",foveaBBoxCoords_Arr)
np.save("centroidCoord_Arr.npy",centroidCoord_Arr)
np.save("ODmaskPredicts_Arr.npy",ODmaskPredicts_Arr)
getFoveaGTCoords = True
if getFoveaGTCoords:
foveCoordsGT = []
tempCoords =[]
imgNo = 0
with open('Datasets/fovea_location.csv') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
#print(row)
tempCoords.append(float(row[1]))
tempCoords.append(float(row[2]))
foveCoordsGT.append(tempCoords)
tempCoords =[]
imgNo += 1
if imgNo == 40:
break
getFoveaCoordsPred = False
'''for i in range(0,40):
myShowImage(image_arr[i])
myShowImage(image_arr_red_channels[i])
myShowImage(image_arr_green_channels[i])
myShowImage(vessels[i])
myShowImage(entropy_arr[i])'''
if getFoveaCoordsPred:
foveaBBoxCoords_Arr = np.load("bbox_fovea.npy")
foveaBBoxCoords_Arr = np.absolute(foveaBBoxCoords_Arr)
removeLen = 0
realCentroidCoords_Arr = []
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for i in range(0,40): # not the best way...
if foveaBBoxCoords_Arr[i][3] < foveaBBoxCoords_Arr[i][1]:
temp = foveaBBoxCoords_Arr[i][1]
foveaBBoxCoords_Arr[i][1] = foveaBBoxCoords_Arr[i][3]
foveaBBoxCoords_Arr[i][3] = temp
if foveaBBoxCoords_Arr[i][2] < foveaBBoxCoords_Arr[i][0]:
temp = foveaBBoxCoords_Arr[i][0]
foveaBBoxCoords_Arr[i][0] = foveaBBoxCoords_Arr[i][2]
foveaBBoxCoords_Arr[i][2] = temp
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
removeLen += bboxShape[0]*bboxShape[1]
#print(removeLen)
for j in range(0,40):
removeLen = (foveaBBoxCoords_Arr[j][2]-foveaBBoxCoords_Arr[j][0]) * (foveaBBoxCoords_Arr[j][3]-foveaBBoxCoords_Arr[j][1])
X_train = np.zeros([3187816-removeLen,3]) # 3187816 = number of points in all fovea bboxs
Y_train = np.zeros([3187816-removeLen,1])
first = 0
for i in range(0,40):
if i == j:
continue
'''if foveaBBoxCoords_Arr[i][3] < foveaBBoxCoords_Arr[i][1]:
temp = foveaBBoxCoords_Arr[i][1]
foveaBBoxCoords_Arr[i][1] = foveaBBoxCoords_Arr[i][3]
foveaBBoxCoords_Arr[i][3] = temp
if foveaBBoxCoords_Arr[i][2] < foveaBBoxCoords_Arr[i][0]:
temp = foveaBBoxCoords_Arr[i][0]
foveaBBoxCoords_Arr[i][0] = foveaBBoxCoords_Arr[i][2]
foveaBBoxCoords_Arr[i][2] = temp'''
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
last = bboxShape[0]*bboxShape[1] + first
foveaRegionGC = image_arr_green_channels[i][foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
#mask
maskBig = np.zeros(test_image.shape)
coordsFoveaCenter = [round(foveCoordsGT[i][1]/4),round(foveCoordsGT[i][0]/4)]
maskBig[coordsFoveaCenter[0]-10:coordsFoveaCenter[0]+10,coordsFoveaCenter[1]-10:coordsFoveaCenter[1]+10] = 1
mask = maskBig[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
'''if (i-1)*bboxShape[0]*bboxShape[1] < 0 and (i)*bboxShape[0]*bboxShape[1] == 0:
X_train[(i-1)*bboxShape[0]*bboxShape[1]::,...] = np.column_stack((fovea_region,foveaRegionGC,highContrast))#,
Y_train[(i-1)*bboxShape[0]*bboxShape[1]::,0] = np.squeeze(mask.reshape([1,bboxShape[0]*bboxShape[1]])).T
continue'''
X_train[first:last,...] = np.column_stack((fovea_region,foveaRegionGC,highContrast))#,
Y_train[first:last,0] = np.squeeze(mask.reshape([1,bboxShape[0]*bboxShape[1]])).T
first = last
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
'''log = open('Classifiers/Segments/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Segments/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()'''
test_image = image_arr[j]
fovea_region = test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
bboxShape = fovea_region.shape
foveaRegionGC = image_arr_green_channels[j][foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
X_val = np.column_stack((fovea_region,foveaRegionGC,highContrast))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape(bboxShape)
img_pred_Bayes = predictsBayes.reshape(bboxShape)
try:
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(img_pred_Bayes)
if centroidCoords.size == 0:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
except:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
maskEyes = np.copy(finalMask)
maskEyes = np.multiply(maskEyes,255)
maskEyes = maskEyes.astype(np.uint8)
#myShowImage(test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]],"fovea")
#myShowImage(maskEyes,"Mask")
#myShowImage(img_pred_Bayes,"Bay")
realCentroidCoords = [centroidCoords[0] + foveaBBoxCoords_Arr[j][0],centroidCoords[1] + foveaBBoxCoords_Arr[j][1]]
realCentroidCoords_Arr.append(realCentroidCoords)
realCentroidCoords_Arr = np.array(realCentroidCoords_Arr)
np.save('fovea_centre_coords.npy',realCentroidCoords_Arr)
#centroidCoord_Arr = np.load("centroidCoord_Arr.npy")
#ODmaskPredicts_Arr = np.load("ODmaskPredicts_Arr.npy")
#for i in range(0,40):
showGraphsClass= False
if showGraphsClass:
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, proba=False, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
if proba:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,-1]
else:
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z,20, **params)
return out
## import some data to play with
#iris = datasets.load_iris()
## Take the first two features. We could avoid this by using a two-dim dataset
#X = iris.data[:, :2]
#y = iris.target
X = X_train_2
y = y_train_2
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
models = (clf_bayes, clf_log) #, clf_svm, clf_svm_rbf)
# title for the plots
titles = ('Bayes',
'Logistic regression')
''' ,
'SVC with linear kernel',
'SVM with RBF kernel')'''
# Set-up 2x2 grid for plotting.
#fig, sub =
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[0::500, 0], X[0::500, 1]
xx, yy = make_meshgrid(X0, X1,h=0.005)
'''_,ax_all = plt.subplots(1,2)
ax = ax_all[1]
plot_contours(ax, clf_bayes, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title("Bayes")
plt.show()'''
showPlots = False
if showPlots:
for clf, title in zip(models, titles):
_,ax_all = plt.subplots(1,2)
ax = ax_all[0]
plot_contours(ax, clf, xx, yy, proba=True, # changed proba to probability
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax = ax_all[1]
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
print("Done")
|
__init__
|
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
|
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
# MASKED: __init__ function (lines 28-39)
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
| 28 | 39 |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
serialize
|
Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())
|
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
# MASKED: serialize function (lines 89-97)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
| 89 | 97 |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
__init__
|
Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
|
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
# MASKED: __init__ function (lines 161-194)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
| 161 | 194 |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
__init__
|
Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
|
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
# MASKED: __init__ function (lines 230-246)
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
| 230 | 246 |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
create_PropertyBundle
|
Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
|
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
# MASKED: create_PropertyBundle function (lines 249-263)
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
| 249 | 263 |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
__init__
|
Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
|
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
# MASKED: __init__ function (lines 270-289)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
| 270 | 289 |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
__init__
|
Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
|
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
# MASKED: __init__ function (lines 296-311)
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
| 296 | 311 |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
__init__
|
Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
|
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
# MASKED: __init__ function (lines 318-333)
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
| 318 | 333 |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
__init__
|
Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
|
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
# MASKED: __init__ function (lines 340-355)
|
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
| 340 | 355 |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
|
put_object_from_filelike
|
Store the byte contents of a file in the repository.
:param handle: filelike object with the byte content to be stored.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
|
# -*- coding: utf-8 -*-
"""Class that defines the abstract interface for an object repository.
The scope of this class is intentionally very narrow. Any backend implementation should merely provide the methods to
store binary blobs, or "objects", and return a string-based key that unique identifies the object that was just created.
This key should then be able to be used to retrieve the bytes of the corresponding object or to delete it.
"""
import abc
import contextlib
import hashlib
import io
import pathlib
from typing import BinaryIO, Iterable, Iterator, List, Optional, Tuple, Union
from aiida.common.hashing import chunked_file_hash
__all__ = ('AbstractRepositoryBackend',)
class AbstractRepositoryBackend(metaclass=abc.ABCMeta):
"""Class that defines the abstract interface for an object repository.
The repository backend only deals with raw bytes, both when creating new objects as well as when returning a stream
or the content of an existing object. The encoding and decoding of the byte content should be done by the client
upstream. The file repository backend is also not expected to keep any kind of file hierarchy but must be assumed
to be a simple flat data store. When files are created in the file object repository, the implementation will return
a string-based key with which the content of the stored object can be addressed. This key is guaranteed to be unique
and persistent. Persisting the key or mapping it onto a virtual file hierarchy is again up to the client upstream.
"""
@property
@abc.abstractmethod
def uuid(self) -> Optional[str]:
"""Return the unique identifier of the repository."""
@property
@abc.abstractmethod
def key_format(self) -> Optional[str]:
"""Return the format for the keys of the repository.
Important for when migrating between backends (e.g. archive -> main), as if they are not equal then it is
necessary to re-compute all the `Node.repository_metadata` before importing (otherwise they will not match
with the repository).
"""
@abc.abstractmethod
def initialise(self, **kwargs) -> None:
"""Initialise the repository if it hasn't already been initialised.
:param kwargs: parameters for the initialisation.
"""
@property
@abc.abstractmethod
def is_initialised(self) -> bool:
"""Return whether the repository has been initialised."""
@abc.abstractmethod
def erase(self) -> None:
"""Delete the repository itself and all its contents.
.. note:: This should not merely delete the contents of the repository but any resources it created. For
example, if the repository is essentially a folder on disk, the folder itself should also be deleted, not
just its contents.
"""
@staticmethod
def is_readable_byte_stream(handle) -> bool:
return hasattr(handle, 'read') and hasattr(handle, 'mode') and 'b' in handle.mode
# MASKED: put_object_from_filelike function (lines 71-80)
@abc.abstractmethod
def _put_object_from_filelike(self, handle: BinaryIO) -> str:
pass
def put_object_from_file(self, filepath: Union[str, pathlib.Path]) -> str:
"""Store a new object with contents of the file located at `filepath` on this file system.
:param filepath: absolute path of file whose contents to copy to the repository.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
"""
with open(filepath, mode='rb') as handle:
return self.put_object_from_filelike(handle)
@abc.abstractmethod
def has_objects(self, keys: List[str]) -> List[bool]:
"""Return whether the repository has an object with the given key.
:param keys:
list of fully qualified identifiers for objects within the repository.
:return:
list of logicals, in the same order as the keys provided, with value True if the respective
object exists and False otherwise.
"""
def has_object(self, key: str) -> bool:
"""Return whether the repository has an object with the given key.
:param key: fully qualified identifier for the object within the repository.
:return: True if the object exists, False otherwise.
"""
return self.has_objects([key])[0]
@abc.abstractmethod
def list_objects(self) -> Iterable[str]:
"""Return iterable that yields all available objects by key.
:return: An iterable for all the available object keys.
"""
@contextlib.contextmanager
def open(self, key: str) -> Iterator[BinaryIO]:
"""Open a file handle to an object stored under the given key.
.. note:: this should only be used to open a handle to read an existing file. To write a new file use the method
``put_object_from_filelike`` instead.
:param key: fully qualified identifier for the object within the repository.
:return: yield a byte stream object.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
if not self.has_object(key):
raise FileNotFoundError(f'object with key `{key}` does not exist.')
def get_object_content(self, key: str) -> bytes:
"""Return the content of a object identified by key.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
with self.open(key) as handle: # pylint: disable=not-context-manager
return handle.read()
@abc.abstractmethod
def iter_object_streams(self, keys: List[str]) -> Iterator[Tuple[str, BinaryIO]]:
"""Return an iterator over the (read-only) byte streams of objects identified by key.
.. note:: handles should only be read within the context of this iterator.
:param keys: fully qualified identifiers for the objects within the repository.
:return: an iterator over the object byte streams.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if a file could not be opened.
"""
def get_object_hash(self, key: str) -> str:
"""Return the SHA-256 hash of an object stored under the given key.
.. important::
A SHA-256 hash should always be returned,
to ensure consistency across different repository implementations.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
with self.open(key) as handle: # pylint: disable=not-context-manager
return chunked_file_hash(handle, hashlib.sha256)
@abc.abstractmethod
def delete_objects(self, keys: List[str]) -> None:
"""Delete the objects from the repository.
:param keys: list of fully qualified identifiers for the objects within the repository.
:raise FileNotFoundError: if any of the files does not exist.
:raise OSError: if any of the files could not be deleted.
"""
keys_exist = self.has_objects(keys)
if not all(keys_exist):
error_message = 'some of the keys provided do not correspond to any object in the repository:\n'
for indx, key_exists in enumerate(keys_exist):
if not key_exists:
error_message += f' > object with key `{keys[indx]}` does not exist.\n'
raise FileNotFoundError(error_message)
def delete_object(self, key: str) -> None:
"""Delete the object from the repository.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be deleted.
"""
return self.delete_objects([key])
|
def put_object_from_filelike(self, handle: BinaryIO) -> str:
"""Store the byte contents of a file in the repository.
:param handle: filelike object with the byte content to be stored.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
"""
if not isinstance(handle, io.BufferedIOBase) and not self.is_readable_byte_stream(handle):
raise TypeError(f'handle does not seem to be a byte stream: {type(handle)}.')
return self._put_object_from_filelike(handle)
| 71 | 80 |
# -*- coding: utf-8 -*-
"""Class that defines the abstract interface for an object repository.
The scope of this class is intentionally very narrow. Any backend implementation should merely provide the methods to
store binary blobs, or "objects", and return a string-based key that unique identifies the object that was just created.
This key should then be able to be used to retrieve the bytes of the corresponding object or to delete it.
"""
import abc
import contextlib
import hashlib
import io
import pathlib
from typing import BinaryIO, Iterable, Iterator, List, Optional, Tuple, Union
from aiida.common.hashing import chunked_file_hash
__all__ = ('AbstractRepositoryBackend',)
class AbstractRepositoryBackend(metaclass=abc.ABCMeta):
"""Class that defines the abstract interface for an object repository.
The repository backend only deals with raw bytes, both when creating new objects as well as when returning a stream
or the content of an existing object. The encoding and decoding of the byte content should be done by the client
upstream. The file repository backend is also not expected to keep any kind of file hierarchy but must be assumed
to be a simple flat data store. When files are created in the file object repository, the implementation will return
a string-based key with which the content of the stored object can be addressed. This key is guaranteed to be unique
and persistent. Persisting the key or mapping it onto a virtual file hierarchy is again up to the client upstream.
"""
@property
@abc.abstractmethod
def uuid(self) -> Optional[str]:
"""Return the unique identifier of the repository."""
@property
@abc.abstractmethod
def key_format(self) -> Optional[str]:
"""Return the format for the keys of the repository.
Important for when migrating between backends (e.g. archive -> main), as if they are not equal then it is
necessary to re-compute all the `Node.repository_metadata` before importing (otherwise they will not match
with the repository).
"""
@abc.abstractmethod
def initialise(self, **kwargs) -> None:
"""Initialise the repository if it hasn't already been initialised.
:param kwargs: parameters for the initialisation.
"""
@property
@abc.abstractmethod
def is_initialised(self) -> bool:
"""Return whether the repository has been initialised."""
@abc.abstractmethod
def erase(self) -> None:
"""Delete the repository itself and all its contents.
.. note:: This should not merely delete the contents of the repository but any resources it created. For
example, if the repository is essentially a folder on disk, the folder itself should also be deleted, not
just its contents.
"""
@staticmethod
def is_readable_byte_stream(handle) -> bool:
return hasattr(handle, 'read') and hasattr(handle, 'mode') and 'b' in handle.mode
def put_object_from_filelike(self, handle: BinaryIO) -> str:
"""Store the byte contents of a file in the repository.
:param handle: filelike object with the byte content to be stored.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
"""
if not isinstance(handle, io.BufferedIOBase) and not self.is_readable_byte_stream(handle):
raise TypeError(f'handle does not seem to be a byte stream: {type(handle)}.')
return self._put_object_from_filelike(handle)
@abc.abstractmethod
def _put_object_from_filelike(self, handle: BinaryIO) -> str:
pass
def put_object_from_file(self, filepath: Union[str, pathlib.Path]) -> str:
"""Store a new object with contents of the file located at `filepath` on this file system.
:param filepath: absolute path of file whose contents to copy to the repository.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
"""
with open(filepath, mode='rb') as handle:
return self.put_object_from_filelike(handle)
@abc.abstractmethod
def has_objects(self, keys: List[str]) -> List[bool]:
"""Return whether the repository has an object with the given key.
:param keys:
list of fully qualified identifiers for objects within the repository.
:return:
list of logicals, in the same order as the keys provided, with value True if the respective
object exists and False otherwise.
"""
def has_object(self, key: str) -> bool:
"""Return whether the repository has an object with the given key.
:param key: fully qualified identifier for the object within the repository.
:return: True if the object exists, False otherwise.
"""
return self.has_objects([key])[0]
@abc.abstractmethod
def list_objects(self) -> Iterable[str]:
"""Return iterable that yields all available objects by key.
:return: An iterable for all the available object keys.
"""
@contextlib.contextmanager
def open(self, key: str) -> Iterator[BinaryIO]:
"""Open a file handle to an object stored under the given key.
.. note:: this should only be used to open a handle to read an existing file. To write a new file use the method
``put_object_from_filelike`` instead.
:param key: fully qualified identifier for the object within the repository.
:return: yield a byte stream object.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
if not self.has_object(key):
raise FileNotFoundError(f'object with key `{key}` does not exist.')
def get_object_content(self, key: str) -> bytes:
"""Return the content of a object identified by key.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
with self.open(key) as handle: # pylint: disable=not-context-manager
return handle.read()
@abc.abstractmethod
def iter_object_streams(self, keys: List[str]) -> Iterator[Tuple[str, BinaryIO]]:
"""Return an iterator over the (read-only) byte streams of objects identified by key.
.. note:: handles should only be read within the context of this iterator.
:param keys: fully qualified identifiers for the objects within the repository.
:return: an iterator over the object byte streams.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if a file could not be opened.
"""
def get_object_hash(self, key: str) -> str:
"""Return the SHA-256 hash of an object stored under the given key.
.. important::
A SHA-256 hash should always be returned,
to ensure consistency across different repository implementations.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
with self.open(key) as handle: # pylint: disable=not-context-manager
return chunked_file_hash(handle, hashlib.sha256)
@abc.abstractmethod
def delete_objects(self, keys: List[str]) -> None:
"""Delete the objects from the repository.
:param keys: list of fully qualified identifiers for the objects within the repository.
:raise FileNotFoundError: if any of the files does not exist.
:raise OSError: if any of the files could not be deleted.
"""
keys_exist = self.has_objects(keys)
if not all(keys_exist):
error_message = 'some of the keys provided do not correspond to any object in the repository:\n'
for indx, key_exists in enumerate(keys_exist):
if not key_exists:
error_message += f' > object with key `{keys[indx]}` does not exist.\n'
raise FileNotFoundError(error_message)
def delete_object(self, key: str) -> None:
"""Delete the object from the repository.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be deleted.
"""
return self.delete_objects([key])
|
__init__
|
Generative Adversarial Imitation Learning that accepts Image Obs
Most parameters are described in and passed to `AdversarialTrainer.__init__`.
Additional parameters that `CNNGAIL` adds on top of its superclass initializer are
as follows:
Args:
discrim_kwargs: Optional keyword arguments to use while constructing the
DiscrimNetGAIL.
|
import logging
from typing import Iterable, Mapping, Optional, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common import on_policy_algorithm, vec_env
from imitation.data import types
from imitation.rewards import discrim_nets
from imitation.algorithms.adversarial import AdversarialTrainer
from .cnn_discriminator import ActObsCNN
class CNNGAIL(AdversarialTrainer):
# MASKED: __init__ function (lines 18-53)
|
def __init__(
self,
venv: vec_env.VecEnv,
expert_data: Union[Iterable[Mapping], types.Transitions],
expert_batch_size: int,
gen_algo: on_policy_algorithm.OnPolicyAlgorithm,
discrim=None,
*,
discrim_kwargs: Optional[Mapping] = None,
**kwargs,
):
"""Generative Adversarial Imitation Learning that accepts Image Obs
Most parameters are described in and passed to `AdversarialTrainer.__init__`.
Additional parameters that `CNNGAIL` adds on top of its superclass initializer are
as follows:
Args:
discrim_kwargs: Optional keyword arguments to use while constructing the
DiscrimNetGAIL.
"""
discrim_kwargs = discrim_kwargs or {}
if discrim == None:
discrim = discrim_nets.DiscrimNetGAIL(
venv.observation_space,
venv.action_space,
discrim_net=ActObsCNN,
**discrim_kwargs,
)
logging.info("using CNN GAIL")
super().__init__(
venv, gen_algo, discrim, expert_data, expert_batch_size, **kwargs
)
| 18 | 53 |
import logging
from typing import Iterable, Mapping, Optional, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common import on_policy_algorithm, vec_env
from imitation.data import types
from imitation.rewards import discrim_nets
from imitation.algorithms.adversarial import AdversarialTrainer
from .cnn_discriminator import ActObsCNN
class CNNGAIL(AdversarialTrainer):
def __init__(
self,
venv: vec_env.VecEnv,
expert_data: Union[Iterable[Mapping], types.Transitions],
expert_batch_size: int,
gen_algo: on_policy_algorithm.OnPolicyAlgorithm,
discrim=None,
*,
discrim_kwargs: Optional[Mapping] = None,
**kwargs,
):
"""Generative Adversarial Imitation Learning that accepts Image Obs
Most parameters are described in and passed to `AdversarialTrainer.__init__`.
Additional parameters that `CNNGAIL` adds on top of its superclass initializer are
as follows:
Args:
discrim_kwargs: Optional keyword arguments to use while constructing the
DiscrimNetGAIL.
"""
discrim_kwargs = discrim_kwargs or {}
if discrim == None:
discrim = discrim_nets.DiscrimNetGAIL(
venv.observation_space,
venv.action_space,
discrim_net=ActObsCNN,
**discrim_kwargs,
)
logging.info("using CNN GAIL")
super().__init__(
venv, gen_algo, discrim, expert_data, expert_batch_size, **kwargs
)
|
workspace
|
Workspace Factory Fixture.
Yields:
directory(Workspace): Workspace Created.
|
""" Orlov Module : workspace module fixture. """
import os
import logging
import pytest
from orlov.libs.workspace import Workspace
logger = logging.getLogger(__name__)
# MASKED: workspace function (lines 11-29)
|
@pytest.fixture(scope='session')
def workspace(request) -> Workspace:
""" Workspace Factory Fixture.
Yields:
directory(Workspace): Workspace Created.
"""
logger.debug('Setup of test structure.')
# create screenshot directory
if request.config.getoption('workspace'):
result_dir = request.config.getoption('workspace')
else:
if not os.path.exists('result'):
logger.debug('Creating results folder to store results')
os.mkdir('result')
result_dir = os.path.join(os.getcwd(), 'result')
logger.debug('Created folder %s', result_dir)
yield Workspace(result_dir)
| 11 | 29 |
""" Orlov Module : workspace module fixture. """
import os
import logging
import pytest
from orlov.libs.workspace import Workspace
logger = logging.getLogger(__name__)
@pytest.fixture(scope='session')
def workspace(request) -> Workspace:
""" Workspace Factory Fixture.
Yields:
directory(Workspace): Workspace Created.
"""
logger.debug('Setup of test structure.')
# create screenshot directory
if request.config.getoption('workspace'):
result_dir = request.config.getoption('workspace')
else:
if not os.path.exists('result'):
logger.debug('Creating results folder to store results')
os.mkdir('result')
result_dir = os.path.join(os.getcwd(), 'result')
logger.debug('Created folder %s', result_dir)
yield Workspace(result_dir)
|
pytest_collection_modifyitems
|
Skip tests marked with '@pytest.mark.skip_for_nginx_oss' for Nginx OSS runs.
Skip tests marked with '@pytest.mark.appprotect' for non AP images.
:param config: pytest config
:param items: pytest collected test-items
:return:
|
"""Describe overall framework configuration."""
import os
import pytest
from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION
from settings import (
DEFAULT_IMAGE,
DEFAULT_PULL_POLICY,
DEFAULT_IC_TYPE,
DEFAULT_SERVICE,
DEFAULT_DEPLOYMENT_TYPE,
NUM_REPLICAS,
BATCH_START,
BATCH_RESOURCES,
)
from suite.resources_utils import get_first_pod_name
def pytest_addoption(parser) -> None:
"""Get cli-arguments.
:param parser: pytest parser
:return:
"""
parser.addoption(
"--context",
action="store",
default="",
help="The context to use in the kubeconfig file.",
)
parser.addoption(
"--image",
action="store",
default=DEFAULT_IMAGE,
help="The Ingress Controller image.",
)
parser.addoption(
"--image-pull-policy",
action="store",
default=DEFAULT_PULL_POLICY,
help="The pull policy of the Ingress Controller image.",
)
parser.addoption(
"--deployment-type",
action="store",
default=DEFAULT_DEPLOYMENT_TYPE,
help="The type of the IC deployment: deployment or daemon-set.",
)
parser.addoption(
"--ic-type",
action="store",
default=DEFAULT_IC_TYPE,
help="The type of the Ingress Controller: nginx-ingress or nginx-ingress-plus.",
)
parser.addoption(
"--service",
action="store",
default=DEFAULT_SERVICE,
help="The type of the Ingress Controller service: nodeport or loadbalancer.",
)
parser.addoption(
"--replicas",
action="store",
default=NUM_REPLICAS,
help="Number of replica pods for type deployment",
)
parser.addoption(
"--node-ip",
action="store",
help="The public IP of a cluster node. Not required if you use the loadbalancer service (see --service argument).",
)
parser.addoption(
"--kubeconfig",
action="store",
default=os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION),
help="An absolute path to a kubeconfig file.",
)
parser.addoption(
"--show-ic-logs",
action="store",
default="no",
help="Show IC logs in stdout on test failure",
)
parser.addoption(
"--batch-start",
action="store",
default=BATCH_START,
help="Run tests for pods restarts with multiple resources deployed (Ingress/VS): True/False",
)
parser.addoption(
"--batch-resources",
action="store",
default=BATCH_RESOURCES,
help="Number of VS/Ingress resources to deploy",
)
# import fixtures into pytest global namespace
pytest_plugins = ["suite.fixtures"]
# MASKED: pytest_collection_modifyitems function (lines 103-131)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item) -> None:
"""
Print out IC Pod logs on test failure.
Only look at actual failing test calls, not setup/teardown.
Only show the logs if commandline argument `--show-ic-logs` is set to 'yes'
:param item:
:return:
"""
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# we only look at actual failing test calls, not setup/teardown
if (
rep.when == "call"
and rep.failed
and item.config.getoption("--show-ic-logs") == "yes"
):
pod_namespace = item.funcargs["ingress_controller_prerequisites"].namespace
pod_name = get_first_pod_name(item.funcargs["kube_apis"].v1, pod_namespace)
print("\n===================== IC Logs Start =====================")
print(
item.funcargs["kube_apis"].v1.read_namespaced_pod_log(
pod_name, pod_namespace
)
)
print("\n===================== IC Logs End =====================")
|
def pytest_collection_modifyitems(config, items) -> None:
"""
Skip tests marked with '@pytest.mark.skip_for_nginx_oss' for Nginx OSS runs.
Skip tests marked with '@pytest.mark.appprotect' for non AP images.
:param config: pytest config
:param items: pytest collected test-items
:return:
"""
if config.getoption("--ic-type") == "nginx-ingress":
skip_for_nginx_oss = pytest.mark.skip(reason="Skip a test for Nginx OSS")
for item in items:
if "skip_for_nginx_oss" in item.keywords:
item.add_marker(skip_for_nginx_oss)
if config.getoption("--ic-type") == "nginx-plus-ingress":
skip_for_nginx_plus = pytest.mark.skip(reason="Skip a test for Nginx Plus")
for item in items:
if "skip_for_nginx_plus" in item.keywords:
item.add_marker(skip_for_nginx_plus)
if "-ap" not in config.getoption("--image"):
appprotect = pytest.mark.skip(reason="Skip AppProtect test in non-AP image")
for item in items:
if "appprotect" in item.keywords:
item.add_marker(appprotect)
if str(config.getoption("--batch-start")) != "True":
batch_start = pytest.mark.skip(reason="Skipping pod restart test with multiple resources")
for item in items:
if "batch_start" in item.keywords:
item.add_marker(batch_start)
| 103 | 131 |
"""Describe overall framework configuration."""
import os
import pytest
from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION
from settings import (
DEFAULT_IMAGE,
DEFAULT_PULL_POLICY,
DEFAULT_IC_TYPE,
DEFAULT_SERVICE,
DEFAULT_DEPLOYMENT_TYPE,
NUM_REPLICAS,
BATCH_START,
BATCH_RESOURCES,
)
from suite.resources_utils import get_first_pod_name
def pytest_addoption(parser) -> None:
"""Get cli-arguments.
:param parser: pytest parser
:return:
"""
parser.addoption(
"--context",
action="store",
default="",
help="The context to use in the kubeconfig file.",
)
parser.addoption(
"--image",
action="store",
default=DEFAULT_IMAGE,
help="The Ingress Controller image.",
)
parser.addoption(
"--image-pull-policy",
action="store",
default=DEFAULT_PULL_POLICY,
help="The pull policy of the Ingress Controller image.",
)
parser.addoption(
"--deployment-type",
action="store",
default=DEFAULT_DEPLOYMENT_TYPE,
help="The type of the IC deployment: deployment or daemon-set.",
)
parser.addoption(
"--ic-type",
action="store",
default=DEFAULT_IC_TYPE,
help="The type of the Ingress Controller: nginx-ingress or nginx-ingress-plus.",
)
parser.addoption(
"--service",
action="store",
default=DEFAULT_SERVICE,
help="The type of the Ingress Controller service: nodeport or loadbalancer.",
)
parser.addoption(
"--replicas",
action="store",
default=NUM_REPLICAS,
help="Number of replica pods for type deployment",
)
parser.addoption(
"--node-ip",
action="store",
help="The public IP of a cluster node. Not required if you use the loadbalancer service (see --service argument).",
)
parser.addoption(
"--kubeconfig",
action="store",
default=os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION),
help="An absolute path to a kubeconfig file.",
)
parser.addoption(
"--show-ic-logs",
action="store",
default="no",
help="Show IC logs in stdout on test failure",
)
parser.addoption(
"--batch-start",
action="store",
default=BATCH_START,
help="Run tests for pods restarts with multiple resources deployed (Ingress/VS): True/False",
)
parser.addoption(
"--batch-resources",
action="store",
default=BATCH_RESOURCES,
help="Number of VS/Ingress resources to deploy",
)
# import fixtures into pytest global namespace
pytest_plugins = ["suite.fixtures"]
def pytest_collection_modifyitems(config, items) -> None:
"""
Skip tests marked with '@pytest.mark.skip_for_nginx_oss' for Nginx OSS runs.
Skip tests marked with '@pytest.mark.appprotect' for non AP images.
:param config: pytest config
:param items: pytest collected test-items
:return:
"""
if config.getoption("--ic-type") == "nginx-ingress":
skip_for_nginx_oss = pytest.mark.skip(reason="Skip a test for Nginx OSS")
for item in items:
if "skip_for_nginx_oss" in item.keywords:
item.add_marker(skip_for_nginx_oss)
if config.getoption("--ic-type") == "nginx-plus-ingress":
skip_for_nginx_plus = pytest.mark.skip(reason="Skip a test for Nginx Plus")
for item in items:
if "skip_for_nginx_plus" in item.keywords:
item.add_marker(skip_for_nginx_plus)
if "-ap" not in config.getoption("--image"):
appprotect = pytest.mark.skip(reason="Skip AppProtect test in non-AP image")
for item in items:
if "appprotect" in item.keywords:
item.add_marker(appprotect)
if str(config.getoption("--batch-start")) != "True":
batch_start = pytest.mark.skip(reason="Skipping pod restart test with multiple resources")
for item in items:
if "batch_start" in item.keywords:
item.add_marker(batch_start)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item) -> None:
"""
Print out IC Pod logs on test failure.
Only look at actual failing test calls, not setup/teardown.
Only show the logs if commandline argument `--show-ic-logs` is set to 'yes'
:param item:
:return:
"""
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# we only look at actual failing test calls, not setup/teardown
if (
rep.when == "call"
and rep.failed
and item.config.getoption("--show-ic-logs") == "yes"
):
pod_namespace = item.funcargs["ingress_controller_prerequisites"].namespace
pod_name = get_first_pod_name(item.funcargs["kube_apis"].v1, pod_namespace)
print("\n===================== IC Logs Start =====================")
print(
item.funcargs["kube_apis"].v1.read_namespaced_pod_log(
pod_name, pod_namespace
)
)
print("\n===================== IC Logs End =====================")
|
pytest_runtest_makereport
|
Print out IC Pod logs on test failure.
Only look at actual failing test calls, not setup/teardown.
Only show the logs if commandline argument `--show-ic-logs` is set to 'yes'
:param item:
:return:
|
"""Describe overall framework configuration."""
import os
import pytest
from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION
from settings import (
DEFAULT_IMAGE,
DEFAULT_PULL_POLICY,
DEFAULT_IC_TYPE,
DEFAULT_SERVICE,
DEFAULT_DEPLOYMENT_TYPE,
NUM_REPLICAS,
BATCH_START,
BATCH_RESOURCES,
)
from suite.resources_utils import get_first_pod_name
def pytest_addoption(parser) -> None:
"""Get cli-arguments.
:param parser: pytest parser
:return:
"""
parser.addoption(
"--context",
action="store",
default="",
help="The context to use in the kubeconfig file.",
)
parser.addoption(
"--image",
action="store",
default=DEFAULT_IMAGE,
help="The Ingress Controller image.",
)
parser.addoption(
"--image-pull-policy",
action="store",
default=DEFAULT_PULL_POLICY,
help="The pull policy of the Ingress Controller image.",
)
parser.addoption(
"--deployment-type",
action="store",
default=DEFAULT_DEPLOYMENT_TYPE,
help="The type of the IC deployment: deployment or daemon-set.",
)
parser.addoption(
"--ic-type",
action="store",
default=DEFAULT_IC_TYPE,
help="The type of the Ingress Controller: nginx-ingress or nginx-ingress-plus.",
)
parser.addoption(
"--service",
action="store",
default=DEFAULT_SERVICE,
help="The type of the Ingress Controller service: nodeport or loadbalancer.",
)
parser.addoption(
"--replicas",
action="store",
default=NUM_REPLICAS,
help="Number of replica pods for type deployment",
)
parser.addoption(
"--node-ip",
action="store",
help="The public IP of a cluster node. Not required if you use the loadbalancer service (see --service argument).",
)
parser.addoption(
"--kubeconfig",
action="store",
default=os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION),
help="An absolute path to a kubeconfig file.",
)
parser.addoption(
"--show-ic-logs",
action="store",
default="no",
help="Show IC logs in stdout on test failure",
)
parser.addoption(
"--batch-start",
action="store",
default=BATCH_START,
help="Run tests for pods restarts with multiple resources deployed (Ingress/VS): True/False",
)
parser.addoption(
"--batch-resources",
action="store",
default=BATCH_RESOURCES,
help="Number of VS/Ingress resources to deploy",
)
# import fixtures into pytest global namespace
pytest_plugins = ["suite.fixtures"]
def pytest_collection_modifyitems(config, items) -> None:
"""
Skip tests marked with '@pytest.mark.skip_for_nginx_oss' for Nginx OSS runs.
Skip tests marked with '@pytest.mark.appprotect' for non AP images.
:param config: pytest config
:param items: pytest collected test-items
:return:
"""
if config.getoption("--ic-type") == "nginx-ingress":
skip_for_nginx_oss = pytest.mark.skip(reason="Skip a test for Nginx OSS")
for item in items:
if "skip_for_nginx_oss" in item.keywords:
item.add_marker(skip_for_nginx_oss)
if config.getoption("--ic-type") == "nginx-plus-ingress":
skip_for_nginx_plus = pytest.mark.skip(reason="Skip a test for Nginx Plus")
for item in items:
if "skip_for_nginx_plus" in item.keywords:
item.add_marker(skip_for_nginx_plus)
if "-ap" not in config.getoption("--image"):
appprotect = pytest.mark.skip(reason="Skip AppProtect test in non-AP image")
for item in items:
if "appprotect" in item.keywords:
item.add_marker(appprotect)
if str(config.getoption("--batch-start")) != "True":
batch_start = pytest.mark.skip(reason="Skipping pod restart test with multiple resources")
for item in items:
if "batch_start" in item.keywords:
item.add_marker(batch_start)
# MASKED: pytest_runtest_makereport function (lines 134-163)
|
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item) -> None:
"""
Print out IC Pod logs on test failure.
Only look at actual failing test calls, not setup/teardown.
Only show the logs if commandline argument `--show-ic-logs` is set to 'yes'
:param item:
:return:
"""
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# we only look at actual failing test calls, not setup/teardown
if (
rep.when == "call"
and rep.failed
and item.config.getoption("--show-ic-logs") == "yes"
):
pod_namespace = item.funcargs["ingress_controller_prerequisites"].namespace
pod_name = get_first_pod_name(item.funcargs["kube_apis"].v1, pod_namespace)
print("\n===================== IC Logs Start =====================")
print(
item.funcargs["kube_apis"].v1.read_namespaced_pod_log(
pod_name, pod_namespace
)
)
print("\n===================== IC Logs End =====================")
| 134 | 163 |
"""Describe overall framework configuration."""
import os
import pytest
from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION
from settings import (
DEFAULT_IMAGE,
DEFAULT_PULL_POLICY,
DEFAULT_IC_TYPE,
DEFAULT_SERVICE,
DEFAULT_DEPLOYMENT_TYPE,
NUM_REPLICAS,
BATCH_START,
BATCH_RESOURCES,
)
from suite.resources_utils import get_first_pod_name
def pytest_addoption(parser) -> None:
"""Get cli-arguments.
:param parser: pytest parser
:return:
"""
parser.addoption(
"--context",
action="store",
default="",
help="The context to use in the kubeconfig file.",
)
parser.addoption(
"--image",
action="store",
default=DEFAULT_IMAGE,
help="The Ingress Controller image.",
)
parser.addoption(
"--image-pull-policy",
action="store",
default=DEFAULT_PULL_POLICY,
help="The pull policy of the Ingress Controller image.",
)
parser.addoption(
"--deployment-type",
action="store",
default=DEFAULT_DEPLOYMENT_TYPE,
help="The type of the IC deployment: deployment or daemon-set.",
)
parser.addoption(
"--ic-type",
action="store",
default=DEFAULT_IC_TYPE,
help="The type of the Ingress Controller: nginx-ingress or nginx-ingress-plus.",
)
parser.addoption(
"--service",
action="store",
default=DEFAULT_SERVICE,
help="The type of the Ingress Controller service: nodeport or loadbalancer.",
)
parser.addoption(
"--replicas",
action="store",
default=NUM_REPLICAS,
help="Number of replica pods for type deployment",
)
parser.addoption(
"--node-ip",
action="store",
help="The public IP of a cluster node. Not required if you use the loadbalancer service (see --service argument).",
)
parser.addoption(
"--kubeconfig",
action="store",
default=os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION),
help="An absolute path to a kubeconfig file.",
)
parser.addoption(
"--show-ic-logs",
action="store",
default="no",
help="Show IC logs in stdout on test failure",
)
parser.addoption(
"--batch-start",
action="store",
default=BATCH_START,
help="Run tests for pods restarts with multiple resources deployed (Ingress/VS): True/False",
)
parser.addoption(
"--batch-resources",
action="store",
default=BATCH_RESOURCES,
help="Number of VS/Ingress resources to deploy",
)
# import fixtures into pytest global namespace
pytest_plugins = ["suite.fixtures"]
def pytest_collection_modifyitems(config, items) -> None:
"""
Skip tests marked with '@pytest.mark.skip_for_nginx_oss' for Nginx OSS runs.
Skip tests marked with '@pytest.mark.appprotect' for non AP images.
:param config: pytest config
:param items: pytest collected test-items
:return:
"""
if config.getoption("--ic-type") == "nginx-ingress":
skip_for_nginx_oss = pytest.mark.skip(reason="Skip a test for Nginx OSS")
for item in items:
if "skip_for_nginx_oss" in item.keywords:
item.add_marker(skip_for_nginx_oss)
if config.getoption("--ic-type") == "nginx-plus-ingress":
skip_for_nginx_plus = pytest.mark.skip(reason="Skip a test for Nginx Plus")
for item in items:
if "skip_for_nginx_plus" in item.keywords:
item.add_marker(skip_for_nginx_plus)
if "-ap" not in config.getoption("--image"):
appprotect = pytest.mark.skip(reason="Skip AppProtect test in non-AP image")
for item in items:
if "appprotect" in item.keywords:
item.add_marker(appprotect)
if str(config.getoption("--batch-start")) != "True":
batch_start = pytest.mark.skip(reason="Skipping pod restart test with multiple resources")
for item in items:
if "batch_start" in item.keywords:
item.add_marker(batch_start)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item) -> None:
"""
Print out IC Pod logs on test failure.
Only look at actual failing test calls, not setup/teardown.
Only show the logs if commandline argument `--show-ic-logs` is set to 'yes'
:param item:
:return:
"""
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# we only look at actual failing test calls, not setup/teardown
if (
rep.when == "call"
and rep.failed
and item.config.getoption("--show-ic-logs") == "yes"
):
pod_namespace = item.funcargs["ingress_controller_prerequisites"].namespace
pod_name = get_first_pod_name(item.funcargs["kube_apis"].v1, pod_namespace)
print("\n===================== IC Logs Start =====================")
print(
item.funcargs["kube_apis"].v1.read_namespaced_pod_log(
pod_name, pod_namespace
)
)
print("\n===================== IC Logs End =====================")
|
read_args_with_defaults
|
Look up parameters starting in the driver's private parameter space, but also searching outer namespaces.
Defining them in a higher namespace allows the axis_ptz.py script to share parameters with the driver.
|
#!/usr/bin/env python
"""
Axis camera video driver. Inspired by:
https://code.ros.org/svn/wg-ros-pkg/branches/trunk_cturtle/sandbox/axis_camera/axis.py
Communication with the camera is done using the Axis VAPIX API described at
http://www.axis.com/global/en/support/developer-support/vapix
.. note::
This is a major rewrite of the former ros-drivers/axis_camera node, so it contains a (deprecated) backwards
compatibility layer for the previous (non-released) API.
"""
import math
import re
import rospy
from sensor_msgs.msg import CompressedImage, CameraInfo
import camera_info_manager
import dynamic_reconfigure.server
from diagnostic_updater import Updater, DiagnosedPublisher, TimeStampStatusParam, FrequencyStatusParam, \
FunctionDiagnosticTask, DiagnosticStatusWrapper
from axis_camera.cfg import VideoStreamConfig
from axis_camera.srv import TakeSnapshot, TakeSnapshotResponse
from axis_camera.vapix import VAPIX
from axis_camera.video_streaming import ImageStreamingThread
from axis_camera.dynamic_reconfigure_tools import change_enum_items
# BACKWARDS COMPATIBILITY LAYER
StreamThread = ImageStreamingThread # deprecated
class Axis(rospy.SubscribeListener):
"""The ROS-VAPIX interface for video streaming."""
def __init__(self, hostname, username, password, width, height, frame_id, camera_info_url, use_encrypted_password,
camera_id=1, auto_wakeup_camera=True, compression=0, fps=24, use_color=True,
use_square_pixels=False):
"""Create the ROS-VAPIX interface.
:param hostname: Hostname of the camera (without http://, can be an IP address).
:type hostname: basestring
:param username: If login is needed, provide a username here.
:type username: :py:obj:`basestring` | None
:param password: If login is needed, provide a password here.
:type password: :py:obj:`basestring` | None
:param width: Width of the requested video stream in pixels (can be changed later). Must be one of the supported
resolutions. If `None`, the resolution will be chosen by height only. If also `height` is `None`,
then the default camera resolution will be used.
:type width: int|None
:param height: Height of the requested video stream in pixels (can be changed later). Must be one of the
supported resolutions. If `None`, the resolution will be chosen by width only. If also `width` is
`None`, then the default camera resolution will be used.
:type height: int|None
:param frame_id: The ROS TF frame assigned to the camera.
:type frame_id: basestring
:param camera_info_url: The URL pointing to the camera calaibration, if available.
:type camera_info_url: basestring
:param use_encrypted_password: Whether to use Plain HTTP Auth (False) or Digest HTTP Auth (True).
:type use_encrypted_password: bool
:param camera_id: ID (number) of the camera. Can be 1 to 4.
:type camera_id: int
:param auto_wakeup_camera: If True, there will be a wakeup trial after first unsuccessful network command.
:type auto_wakeup_camera: bool
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:param fps: The desired frames per second.
:type fps: int
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the requested resolution (either the `resolution`, or `width`+`height`
is not supported.
"""
# True every time the video parameters have changed and the URL has to be altered (set from other threads).
self.video_params_changed = False
self.__initializing = True
self._hostname = hostname
self._camera_id = camera_id
self.diagnostic_updater = Updater()
self.diagnostic_updater.setHardwareID(hostname)
self._api = None
# autodetect the VAPIX API and connect to it; try it forever
while self._api is None and not rospy.is_shutdown():
try:
self._api = VAPIX.get_api_for_camera(hostname, username, password, camera_id, use_encrypted_password)
except (IOError, ValueError):
rospy.loginfo("Retrying connection to VAPIX on host %s, camera %d in 2 seconds." %
(hostname, camera_id))
rospy.sleep(2)
if rospy.is_shutdown():
return
self._allowed_resolutions = self._get_allowed_resolutions()
rospy.loginfo("The following resolutions are available for camera %d:\n%s" %
(camera_id, "\n".join([str(res) for res in self._allowed_resolutions])))
rospy.set_param("~allowed_resolutions", [res.get_vapix_representation() for res in self._allowed_resolutions])
# Sometimes the camera falls into power saving mode and stops streaming.
# This setting allows the script to try to wake up the camera.
self._auto_wakeup_camera = auto_wakeup_camera
# dynamic-reconfigurable properties - definitions
self._width = None # deprecated
self._height = None # deprecated
self._resolution = None
self._compression = None
self._fps = None
self._use_color = None
self._use_square_pixels = None
# treat empty strings as None in width and height params
width = width if width != "" else None
height = height if height != "" else None
# dynamic-reconfigurable properties - defaults
if width is None and height is None:
# TODO change to perform default resolution detection from VAPIX
self.set_resolution(self._allowed_resolutions[0])
else:
resolution = self.find_resolution_by_size(width, height)
self.set_resolution(resolution.get_vapix_representation())
self.set_compression(compression)
self.set_fps(fps)
self.set_use_color(use_color)
self.set_use_square_pixels(use_square_pixels)
# only advertise the supported resolutions on dynamic reconfigure
change_enum_items(
VideoStreamConfig,
"resolution",
[{
'name': res.name if isinstance(res, CIFVideoResolution) else str(res),
'value': res.get_vapix_representation(),
'description': str(res)
} for res in self._allowed_resolutions],
self._resolution.get_vapix_representation()
)
# dynamic reconfigure server
self._video_stream_param_change_server = dynamic_reconfigure.server.Server(VideoStreamConfig,
self.reconfigure_video)
# camera info setup
self._frame_id = frame_id
self._camera_info_url = camera_info_url
# generate a valid camera name based on the hostname
self._camera_name = camera_info_manager.genCameraName(self._hostname)
self._camera_info = camera_info_manager.CameraInfoManager(cname=self._camera_name, url=self._camera_info_url)
self._camera_info.loadCameraInfo() # required before getCameraInfo()
# the thread used for streaming images (is instantiated when the first image subscriber subscribes)
self._streaming_thread = None
# the publishers are started/stopped lazily in peer_subscribe/peer_unsubscribe
self._video_publisher_frequency_diagnostic = FrequencyStatusParam({'min': self._fps, 'max': self._fps})
self._video_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("image_raw/compressed", CompressedImage, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._camera_info_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("camera_info", CameraInfo, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._snapshot_server = rospy.Service("take_snapshot", TakeSnapshot, self.take_snapshot)
self.diagnostic_updater.add(FunctionDiagnosticTask("Camera parameters", self._camera_diagnostic_callback))
# BACKWARDS COMPATIBILITY LAYER
self.username = username # deprecated
self.password = password # deprecated
self.use_encrypted_password = use_encrypted_password # deprecated
self.st = None # deprecated
self.pub = self._video_publisher # deprecated
self.caminfo_pub = self._camera_info_publisher # deprecated
self.__initializing = False
def __str__(self):
(width, height) = self._resolution.get_resolution(self._use_square_pixels)
return 'Axis driver on host %s, camera %d (%dx%d px @ %d FPS)' % \
(self._hostname, self._api.camera_id, width, height, self._fps)
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
"""Lazy-start the image-publisher."""
if self._streaming_thread is None:
self._streaming_thread = ImageStreamingThread(self)
self._streaming_thread.start()
else:
self._streaming_thread.resume()
def peer_unsubscribe(self, topic_name, num_peers):
"""Lazy-stop the image-publisher when nobody is interested"""
if num_peers == 0:
self._streaming_thread.pause()
def take_snapshot(self, request):
"""Retrieve a snapshot from the camera.
:param request: The service request.
:type request: :py:class:`axis_camera.srv.TakeSnapshotRequest`
:return: The response containing the image.
:rtype: :py:class:`axis_camera.srv.TakeSnapshotResponse`
:raises: :py:exc:`IOError`, :py:exc:`urllib2.URLError`
"""
image_data = self._api.take_snapshot()
image = CompressedImage()
image.header.stamp = rospy.Time.now()
image.header.frame_id = self._frame_id
image.format = "jpeg"
image.data = image_data
response = TakeSnapshotResponse()
response.image = image
return response
def reconfigure_video(self, config, level):
"""Dynamic reconfigure callback for video parameters.
:param config: The requested configuration.
:type config: dict
:param level: Unused here.
:type level: int
:return: The config corresponding to what was really achieved.
:rtype: dict
"""
if self.__initializing:
# in the initialization phase, we want to give precedence to the values given to the constructor
config.compression = self._compression
config.fps = self._fps
config.use_color = self._use_color
config.use_square_pixels = self._use_square_pixels
config.resolution = self._resolution.get_vapix_representation()
else:
self.__try_set_value_from_config(config, 'compression', self.set_compression)
self.__try_set_value_from_config(config, 'fps', self.set_fps)
self.__try_set_value_from_config(config, 'use_color', self.set_use_color)
self.__try_set_value_from_config(config, 'use_square_pixels', self.set_use_square_pixels)
try:
self.set_resolution(config.resolution)
except ValueError:
config.resolution = self._resolution.get_vapix_representation()
return config
def __try_set_value_from_config(self, config, field, setter):
"""First, try to call `setter(config[field])`, and if this call doesn't succeed. set the field in config to
its value stored in this class.
:param config: The dynamic reconfigure config dictionary.
:type config: dict
:param field: The field name (both in :py:obj:`config` and in :py:obj:`self`).
:type field: basestring
:param setter: The setter to use to set the value.
:type setter: lambda function
"""
try:
setter(config[field])
except ValueError:
config[field] = getattr(self, field)
#################################
# DYNAMIC RECONFIGURE CALLBACKS #
#################################
def set_resolution(self, resolution_value):
"""Request a new resolution for the video stream.
:param resolution_value: The string of type `width`x`height` or a :py:class:`VideoResolution` object.
:type resolution_value: basestring|VideoResolution
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
resolution = None
if isinstance(resolution_value, VideoResolution):
resolution = resolution_value
elif isinstance(resolution_value, basestring):
resolution = self._get_resolution_from_param_value(resolution_value)
if resolution is None:
raise ValueError("Unsupported resolution type specified: %r" % resolution_value)
if self._resolution is None or resolution != self._resolution:
self._resolution = resolution
self.video_params_changed = True
# deprecated values
self._width = resolution.get_resolution(self._use_square_pixels)[0]
self._height = resolution.get_resolution(self._use_square_pixels)[1]
def _get_resolution_from_param_value(self, value):
"""Return a :py:class:`VideoResolution` object corresponding to the given video resolution param string.
:param value: Value of the resolution parameter to parse (of form `width`x`height`).
:type value: basestring
:return: The :py:class:`VideoResolution` corresponding to the given resolution param string.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
for resolution in self._allowed_resolutions:
if resolution.get_vapix_representation() == value:
return resolution
raise ValueError("%s is not a valid valid resolution." % value)
def find_resolution_by_size(self, width, height):
"""Return a :py:class:`VideoResolution` object with the given dimensions.
If there are more resolutions with the same size, any of them may be returned.
:param width: Image width in pixels. If `None`, resolutions will be matched only by height.
:type width: int|None
:param height: Image height in pixels. If `None`, resolutions will be matched only by width.
:type height: int|None
:return: The corresponding resolution object.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if no resolution with the given dimensions can be found.
:raises: :py:exc:`ValueError` if both `width` and `height` are None.
"""
if width is None and height is None:
raise ValueError("Either width or height of the desired resolution must be specified.")
for resolution in self._allowed_resolutions:
size = resolution.get_resolution(use_square_pixels=False)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
size = resolution.get_resolution(use_square_pixels=True)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
raise ValueError("Cannot find a supported resolution with dimensions %sx%s" % (width, height))
def _get_allowed_resolutions(self):
"""Return a list of resolutions supported both by the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
camera_resolutions = self._get_resolutions_supported_by_camera()
return camera_resolutions
def _get_resolutions_supported_by_camera(self):
"""Return a list of resolutions supported the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
try:
names = self._api.parse_list_parameter_value(self._api.get_parameter("Properties.Image.Resolution"))
return [VideoResolution.parse_from_vapix_param_value(name, self._api) for name in names]
except (IOError, ValueError):
rospy.logwarn("Could not determine resolutions supported by the camera. Asssuming only CIF.")
return [CIFVideoResolution("CIF", 384, 288)]
def set_compression(self, compression):
"""Request the given compression level for the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
if compression != self._compression:
self._compression = self.sanitize_compression(compression)
self.video_params_changed = True
@staticmethod
def sanitize_compression(compression):
"""Make sure the given value can be used as a compression level of the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:return: The given compression converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
compression = int(compression)
if not (0 <= compression <= 100):
raise ValueError("%s is not a valid value for compression." % str(compression))
return compression
def set_fps(self, fps):
"""Request the given compression level for the video stream.
:param fps: The desired frames per second.
:type fps: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
if fps != self._fps:
self._fps = self.sanitize_fps(fps)
self.video_params_changed = True
if hasattr(self, "_video_publisher_frequency_diagnostic"):
self._video_publisher_frequency_diagnostic.freq_bound['min'] = self._fps
self._video_publisher_frequency_diagnostic.freq_bound['max'] = self._fps
@staticmethod
def sanitize_fps(fps):
"""Make sure the given value can be used as FPS of the video stream.
:param fps: The desired frames per second.
:type fps: int
:return: The given FPS converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
fps = int(fps)
if not (1 <= fps <= 30):
raise ValueError("%s is not a valid value for FPS." % str(fps))
return fps
def set_use_color(self, use_color):
"""Request using/not using color in the video stream.
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_color != self._use_color:
self._use_color = self.sanitize_bool(use_color, "use_color")
self.video_params_changed = True
def set_use_square_pixels(self, use_square_pixels):
"""Request using/not using square pixels.
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_square_pixels != self._use_square_pixels:
self._use_square_pixels = self.sanitize_bool(use_square_pixels, "use_square_pixels")
self.video_params_changed = True
@staticmethod
def sanitize_bool(value, field_name):
"""Convert the given value to a bool.
:param value: Either True, False,, "1", "0", 1 or 0.
:type value: :py:class:`basestring` | :py:class:`bool` | :py:class:`int`
:param field_name: Name of the field this value belongs to (just for debug messages).
:type field_name: basestring
:return: The bool value of the given value.
:rtype: :py:class:`bool`
:raises: :py:exc:`ValueError` if the given value is not supported in this conversion.
"""
if value not in (True, False, "1", "0", 1, 0):
raise ValueError("%s is not a valid value for %s." % (str(value), field_name))
# bool("0") returns True because it is a nonempty string
if value == "0":
return False
return bool(value)
def _camera_diagnostic_callback(self, diag_message):
assert isinstance(diag_message, DiagnosticStatusWrapper)
diag_message.summary(DiagnosticStatusWrapper.OK, "Video parameters")
diag_message.add("FPS", self._fps)
diag_message.add("Resolution", self._resolution)
diag_message.add("Compression", self._compression)
diag_message.add("Color image", self._use_color)
diag_message.add("Square pixels used", self._use_square_pixels)
class VideoResolution(object):
"""A class representing a video resolution."""
def __init__(self, width, height):
"""Create a representation of the resolution.
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(VideoResolution, self).__init__()
self.width = int(width)
self.height = int(height)
self.square_pixel_conversion_ratio_width = 12.0 / 11.0
self.square_pixel_conversion_ratio_height = 1
def __str__(self):
return "%dx%d" % (self.width, self.height)
def __repr__(self):
return "VideoResolution(width=%r,height=%r)" % (self.width, self.height)
def __eq__(self, other):
# compare by attribute values
return self.__dict__ == other.__dict__
def __ne__(self, other):
# reuse the former __eq__ definition
return not self == other
def get_resolution(self, use_square_pixels=False):
"""Get the image dimensions corresponding to this resolution.
:param use_square_pixels: Whether to strech the resulting resolution to square pixels.
:type use_square_pixels: bool
:return: A tuple (width, height)
:rtype: tuple
"""
width = self.width
height = self.height
if use_square_pixels:
width = int(math.ceil(self.square_pixel_conversion_ratio_width * self.width))
height = int(math.ceil(self.square_pixel_conversion_ratio_height * self.height))
return width, height
def get_vapix_representation(self):
return "%dx%d" % (self.width, self.height)
@staticmethod
def parse_from_vapix_param_value(value, api):
assert isinstance(value, basestring)
assert isinstance(api, VAPIX)
numeric_regexp = re.compile(r"(\d+)x(\d+)")
match = numeric_regexp.match(value)
if match is not None:
return VideoResolution(int(match.group(1)), int(match.group(2)))
else: # resolution given by CIF name
name = value
width, height = api.resolve_video_resolution_name(name)
return CIFVideoResolution(name, width, height)
class CIFVideoResolution(VideoResolution):
"""A class representing a CIF standard resolution."""
def __init__(self, name, width, height):
"""Create a representation of a CIF resolution.
:param name: CIF standard name of the resolution.
:type name: basestring
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(CIFVideoResolution, self).__init__(width, height)
self.name = name
def __str__(self):
return "%s (%dx%d)" % (self.name, self.width, self.height)
def __repr__(self):
return "CIFVideoResolution(name=%r,width=%r,height=%r)" % (self.name, self.width, self.height)
def main():
"""Start the ROS driver and ROS node."""
rospy.init_node("axis_driver")
arg_defaults = {
'hostname': '192.168.0.90', # default IP address
'username': None, # default login name
'password': None,
'width': 704,
'height': 576,
'frame_id': 'axis_camera',
'camera_info_url': '',
'use_encrypted_password': False,
'camera_id': 1,
'auto_wakeup_camera': True,
'compression': 0,
'fps': 24,
'use_color': True,
'use_square_pixels': False,
}
args = read_args_with_defaults(arg_defaults)
axis = Axis(**args)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
axis.diagnostic_updater.update()
try:
rate.sleep()
except rospy.ROSTimeMovedBackwardsException:
rospy.logwarn("Detected jump back in time.")
class PausableDiagnosedPublisher(DiagnosedPublisher):
def __init__(self, axis, pub, diag, freq, stamp):
DiagnosedPublisher.__init__(self, pub, diag, freq, stamp)
self._axis = axis
def run(self, stat):
if self._axis._streaming_thread is None or self._axis._streaming_thread.is_paused():
stat.summary(DiagnosticStatusWrapper.OK, "Video not subscribed")
else:
stat = DiagnosedPublisher.run(self, stat)
return stat
# MASKED: read_args_with_defaults function (lines 628-647)
if __name__ == "__main__":
main()
|
def read_args_with_defaults(arg_defaults):
"""Look up parameters starting in the driver's private parameter space, but also searching outer namespaces.
Defining them in a higher namespace allows the axis_ptz.py script to share parameters with the driver."""
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
# resolve frame_id with tf_prefix (unless already absolute)
if args['frame_id'][0] != '/': # not absolute?
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None: # prefix defined?
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/': # prefix not absolute?
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return args
| 628 | 647 |
#!/usr/bin/env python
"""
Axis camera video driver. Inspired by:
https://code.ros.org/svn/wg-ros-pkg/branches/trunk_cturtle/sandbox/axis_camera/axis.py
Communication with the camera is done using the Axis VAPIX API described at
http://www.axis.com/global/en/support/developer-support/vapix
.. note::
This is a major rewrite of the former ros-drivers/axis_camera node, so it contains a (deprecated) backwards
compatibility layer for the previous (non-released) API.
"""
import math
import re
import rospy
from sensor_msgs.msg import CompressedImage, CameraInfo
import camera_info_manager
import dynamic_reconfigure.server
from diagnostic_updater import Updater, DiagnosedPublisher, TimeStampStatusParam, FrequencyStatusParam, \
FunctionDiagnosticTask, DiagnosticStatusWrapper
from axis_camera.cfg import VideoStreamConfig
from axis_camera.srv import TakeSnapshot, TakeSnapshotResponse
from axis_camera.vapix import VAPIX
from axis_camera.video_streaming import ImageStreamingThread
from axis_camera.dynamic_reconfigure_tools import change_enum_items
# BACKWARDS COMPATIBILITY LAYER
StreamThread = ImageStreamingThread # deprecated
class Axis(rospy.SubscribeListener):
"""The ROS-VAPIX interface for video streaming."""
def __init__(self, hostname, username, password, width, height, frame_id, camera_info_url, use_encrypted_password,
camera_id=1, auto_wakeup_camera=True, compression=0, fps=24, use_color=True,
use_square_pixels=False):
"""Create the ROS-VAPIX interface.
:param hostname: Hostname of the camera (without http://, can be an IP address).
:type hostname: basestring
:param username: If login is needed, provide a username here.
:type username: :py:obj:`basestring` | None
:param password: If login is needed, provide a password here.
:type password: :py:obj:`basestring` | None
:param width: Width of the requested video stream in pixels (can be changed later). Must be one of the supported
resolutions. If `None`, the resolution will be chosen by height only. If also `height` is `None`,
then the default camera resolution will be used.
:type width: int|None
:param height: Height of the requested video stream in pixels (can be changed later). Must be one of the
supported resolutions. If `None`, the resolution will be chosen by width only. If also `width` is
`None`, then the default camera resolution will be used.
:type height: int|None
:param frame_id: The ROS TF frame assigned to the camera.
:type frame_id: basestring
:param camera_info_url: The URL pointing to the camera calaibration, if available.
:type camera_info_url: basestring
:param use_encrypted_password: Whether to use Plain HTTP Auth (False) or Digest HTTP Auth (True).
:type use_encrypted_password: bool
:param camera_id: ID (number) of the camera. Can be 1 to 4.
:type camera_id: int
:param auto_wakeup_camera: If True, there will be a wakeup trial after first unsuccessful network command.
:type auto_wakeup_camera: bool
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:param fps: The desired frames per second.
:type fps: int
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the requested resolution (either the `resolution`, or `width`+`height`
is not supported.
"""
# True every time the video parameters have changed and the URL has to be altered (set from other threads).
self.video_params_changed = False
self.__initializing = True
self._hostname = hostname
self._camera_id = camera_id
self.diagnostic_updater = Updater()
self.diagnostic_updater.setHardwareID(hostname)
self._api = None
# autodetect the VAPIX API and connect to it; try it forever
while self._api is None and not rospy.is_shutdown():
try:
self._api = VAPIX.get_api_for_camera(hostname, username, password, camera_id, use_encrypted_password)
except (IOError, ValueError):
rospy.loginfo("Retrying connection to VAPIX on host %s, camera %d in 2 seconds." %
(hostname, camera_id))
rospy.sleep(2)
if rospy.is_shutdown():
return
self._allowed_resolutions = self._get_allowed_resolutions()
rospy.loginfo("The following resolutions are available for camera %d:\n%s" %
(camera_id, "\n".join([str(res) for res in self._allowed_resolutions])))
rospy.set_param("~allowed_resolutions", [res.get_vapix_representation() for res in self._allowed_resolutions])
# Sometimes the camera falls into power saving mode and stops streaming.
# This setting allows the script to try to wake up the camera.
self._auto_wakeup_camera = auto_wakeup_camera
# dynamic-reconfigurable properties - definitions
self._width = None # deprecated
self._height = None # deprecated
self._resolution = None
self._compression = None
self._fps = None
self._use_color = None
self._use_square_pixels = None
# treat empty strings as None in width and height params
width = width if width != "" else None
height = height if height != "" else None
# dynamic-reconfigurable properties - defaults
if width is None and height is None:
# TODO change to perform default resolution detection from VAPIX
self.set_resolution(self._allowed_resolutions[0])
else:
resolution = self.find_resolution_by_size(width, height)
self.set_resolution(resolution.get_vapix_representation())
self.set_compression(compression)
self.set_fps(fps)
self.set_use_color(use_color)
self.set_use_square_pixels(use_square_pixels)
# only advertise the supported resolutions on dynamic reconfigure
change_enum_items(
VideoStreamConfig,
"resolution",
[{
'name': res.name if isinstance(res, CIFVideoResolution) else str(res),
'value': res.get_vapix_representation(),
'description': str(res)
} for res in self._allowed_resolutions],
self._resolution.get_vapix_representation()
)
# dynamic reconfigure server
self._video_stream_param_change_server = dynamic_reconfigure.server.Server(VideoStreamConfig,
self.reconfigure_video)
# camera info setup
self._frame_id = frame_id
self._camera_info_url = camera_info_url
# generate a valid camera name based on the hostname
self._camera_name = camera_info_manager.genCameraName(self._hostname)
self._camera_info = camera_info_manager.CameraInfoManager(cname=self._camera_name, url=self._camera_info_url)
self._camera_info.loadCameraInfo() # required before getCameraInfo()
# the thread used for streaming images (is instantiated when the first image subscriber subscribes)
self._streaming_thread = None
# the publishers are started/stopped lazily in peer_subscribe/peer_unsubscribe
self._video_publisher_frequency_diagnostic = FrequencyStatusParam({'min': self._fps, 'max': self._fps})
self._video_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("image_raw/compressed", CompressedImage, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._camera_info_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("camera_info", CameraInfo, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._snapshot_server = rospy.Service("take_snapshot", TakeSnapshot, self.take_snapshot)
self.diagnostic_updater.add(FunctionDiagnosticTask("Camera parameters", self._camera_diagnostic_callback))
# BACKWARDS COMPATIBILITY LAYER
self.username = username # deprecated
self.password = password # deprecated
self.use_encrypted_password = use_encrypted_password # deprecated
self.st = None # deprecated
self.pub = self._video_publisher # deprecated
self.caminfo_pub = self._camera_info_publisher # deprecated
self.__initializing = False
def __str__(self):
(width, height) = self._resolution.get_resolution(self._use_square_pixels)
return 'Axis driver on host %s, camera %d (%dx%d px @ %d FPS)' % \
(self._hostname, self._api.camera_id, width, height, self._fps)
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
"""Lazy-start the image-publisher."""
if self._streaming_thread is None:
self._streaming_thread = ImageStreamingThread(self)
self._streaming_thread.start()
else:
self._streaming_thread.resume()
def peer_unsubscribe(self, topic_name, num_peers):
"""Lazy-stop the image-publisher when nobody is interested"""
if num_peers == 0:
self._streaming_thread.pause()
def take_snapshot(self, request):
"""Retrieve a snapshot from the camera.
:param request: The service request.
:type request: :py:class:`axis_camera.srv.TakeSnapshotRequest`
:return: The response containing the image.
:rtype: :py:class:`axis_camera.srv.TakeSnapshotResponse`
:raises: :py:exc:`IOError`, :py:exc:`urllib2.URLError`
"""
image_data = self._api.take_snapshot()
image = CompressedImage()
image.header.stamp = rospy.Time.now()
image.header.frame_id = self._frame_id
image.format = "jpeg"
image.data = image_data
response = TakeSnapshotResponse()
response.image = image
return response
def reconfigure_video(self, config, level):
"""Dynamic reconfigure callback for video parameters.
:param config: The requested configuration.
:type config: dict
:param level: Unused here.
:type level: int
:return: The config corresponding to what was really achieved.
:rtype: dict
"""
if self.__initializing:
# in the initialization phase, we want to give precedence to the values given to the constructor
config.compression = self._compression
config.fps = self._fps
config.use_color = self._use_color
config.use_square_pixels = self._use_square_pixels
config.resolution = self._resolution.get_vapix_representation()
else:
self.__try_set_value_from_config(config, 'compression', self.set_compression)
self.__try_set_value_from_config(config, 'fps', self.set_fps)
self.__try_set_value_from_config(config, 'use_color', self.set_use_color)
self.__try_set_value_from_config(config, 'use_square_pixels', self.set_use_square_pixels)
try:
self.set_resolution(config.resolution)
except ValueError:
config.resolution = self._resolution.get_vapix_representation()
return config
def __try_set_value_from_config(self, config, field, setter):
"""First, try to call `setter(config[field])`, and if this call doesn't succeed. set the field in config to
its value stored in this class.
:param config: The dynamic reconfigure config dictionary.
:type config: dict
:param field: The field name (both in :py:obj:`config` and in :py:obj:`self`).
:type field: basestring
:param setter: The setter to use to set the value.
:type setter: lambda function
"""
try:
setter(config[field])
except ValueError:
config[field] = getattr(self, field)
#################################
# DYNAMIC RECONFIGURE CALLBACKS #
#################################
def set_resolution(self, resolution_value):
"""Request a new resolution for the video stream.
:param resolution_value: The string of type `width`x`height` or a :py:class:`VideoResolution` object.
:type resolution_value: basestring|VideoResolution
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
resolution = None
if isinstance(resolution_value, VideoResolution):
resolution = resolution_value
elif isinstance(resolution_value, basestring):
resolution = self._get_resolution_from_param_value(resolution_value)
if resolution is None:
raise ValueError("Unsupported resolution type specified: %r" % resolution_value)
if self._resolution is None or resolution != self._resolution:
self._resolution = resolution
self.video_params_changed = True
# deprecated values
self._width = resolution.get_resolution(self._use_square_pixels)[0]
self._height = resolution.get_resolution(self._use_square_pixels)[1]
def _get_resolution_from_param_value(self, value):
"""Return a :py:class:`VideoResolution` object corresponding to the given video resolution param string.
:param value: Value of the resolution parameter to parse (of form `width`x`height`).
:type value: basestring
:return: The :py:class:`VideoResolution` corresponding to the given resolution param string.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
for resolution in self._allowed_resolutions:
if resolution.get_vapix_representation() == value:
return resolution
raise ValueError("%s is not a valid valid resolution." % value)
def find_resolution_by_size(self, width, height):
"""Return a :py:class:`VideoResolution` object with the given dimensions.
If there are more resolutions with the same size, any of them may be returned.
:param width: Image width in pixels. If `None`, resolutions will be matched only by height.
:type width: int|None
:param height: Image height in pixels. If `None`, resolutions will be matched only by width.
:type height: int|None
:return: The corresponding resolution object.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if no resolution with the given dimensions can be found.
:raises: :py:exc:`ValueError` if both `width` and `height` are None.
"""
if width is None and height is None:
raise ValueError("Either width or height of the desired resolution must be specified.")
for resolution in self._allowed_resolutions:
size = resolution.get_resolution(use_square_pixels=False)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
size = resolution.get_resolution(use_square_pixels=True)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
raise ValueError("Cannot find a supported resolution with dimensions %sx%s" % (width, height))
def _get_allowed_resolutions(self):
"""Return a list of resolutions supported both by the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
camera_resolutions = self._get_resolutions_supported_by_camera()
return camera_resolutions
def _get_resolutions_supported_by_camera(self):
"""Return a list of resolutions supported the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
try:
names = self._api.parse_list_parameter_value(self._api.get_parameter("Properties.Image.Resolution"))
return [VideoResolution.parse_from_vapix_param_value(name, self._api) for name in names]
except (IOError, ValueError):
rospy.logwarn("Could not determine resolutions supported by the camera. Asssuming only CIF.")
return [CIFVideoResolution("CIF", 384, 288)]
def set_compression(self, compression):
"""Request the given compression level for the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
if compression != self._compression:
self._compression = self.sanitize_compression(compression)
self.video_params_changed = True
@staticmethod
def sanitize_compression(compression):
"""Make sure the given value can be used as a compression level of the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:return: The given compression converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
compression = int(compression)
if not (0 <= compression <= 100):
raise ValueError("%s is not a valid value for compression." % str(compression))
return compression
def set_fps(self, fps):
"""Request the given compression level for the video stream.
:param fps: The desired frames per second.
:type fps: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
if fps != self._fps:
self._fps = self.sanitize_fps(fps)
self.video_params_changed = True
if hasattr(self, "_video_publisher_frequency_diagnostic"):
self._video_publisher_frequency_diagnostic.freq_bound['min'] = self._fps
self._video_publisher_frequency_diagnostic.freq_bound['max'] = self._fps
@staticmethod
def sanitize_fps(fps):
"""Make sure the given value can be used as FPS of the video stream.
:param fps: The desired frames per second.
:type fps: int
:return: The given FPS converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
fps = int(fps)
if not (1 <= fps <= 30):
raise ValueError("%s is not a valid value for FPS." % str(fps))
return fps
def set_use_color(self, use_color):
"""Request using/not using color in the video stream.
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_color != self._use_color:
self._use_color = self.sanitize_bool(use_color, "use_color")
self.video_params_changed = True
def set_use_square_pixels(self, use_square_pixels):
"""Request using/not using square pixels.
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_square_pixels != self._use_square_pixels:
self._use_square_pixels = self.sanitize_bool(use_square_pixels, "use_square_pixels")
self.video_params_changed = True
@staticmethod
def sanitize_bool(value, field_name):
"""Convert the given value to a bool.
:param value: Either True, False,, "1", "0", 1 or 0.
:type value: :py:class:`basestring` | :py:class:`bool` | :py:class:`int`
:param field_name: Name of the field this value belongs to (just for debug messages).
:type field_name: basestring
:return: The bool value of the given value.
:rtype: :py:class:`bool`
:raises: :py:exc:`ValueError` if the given value is not supported in this conversion.
"""
if value not in (True, False, "1", "0", 1, 0):
raise ValueError("%s is not a valid value for %s." % (str(value), field_name))
# bool("0") returns True because it is a nonempty string
if value == "0":
return False
return bool(value)
def _camera_diagnostic_callback(self, diag_message):
assert isinstance(diag_message, DiagnosticStatusWrapper)
diag_message.summary(DiagnosticStatusWrapper.OK, "Video parameters")
diag_message.add("FPS", self._fps)
diag_message.add("Resolution", self._resolution)
diag_message.add("Compression", self._compression)
diag_message.add("Color image", self._use_color)
diag_message.add("Square pixels used", self._use_square_pixels)
class VideoResolution(object):
"""A class representing a video resolution."""
def __init__(self, width, height):
"""Create a representation of the resolution.
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(VideoResolution, self).__init__()
self.width = int(width)
self.height = int(height)
self.square_pixel_conversion_ratio_width = 12.0 / 11.0
self.square_pixel_conversion_ratio_height = 1
def __str__(self):
return "%dx%d" % (self.width, self.height)
def __repr__(self):
return "VideoResolution(width=%r,height=%r)" % (self.width, self.height)
def __eq__(self, other):
# compare by attribute values
return self.__dict__ == other.__dict__
def __ne__(self, other):
# reuse the former __eq__ definition
return not self == other
def get_resolution(self, use_square_pixels=False):
"""Get the image dimensions corresponding to this resolution.
:param use_square_pixels: Whether to strech the resulting resolution to square pixels.
:type use_square_pixels: bool
:return: A tuple (width, height)
:rtype: tuple
"""
width = self.width
height = self.height
if use_square_pixels:
width = int(math.ceil(self.square_pixel_conversion_ratio_width * self.width))
height = int(math.ceil(self.square_pixel_conversion_ratio_height * self.height))
return width, height
def get_vapix_representation(self):
return "%dx%d" % (self.width, self.height)
@staticmethod
def parse_from_vapix_param_value(value, api):
assert isinstance(value, basestring)
assert isinstance(api, VAPIX)
numeric_regexp = re.compile(r"(\d+)x(\d+)")
match = numeric_regexp.match(value)
if match is not None:
return VideoResolution(int(match.group(1)), int(match.group(2)))
else: # resolution given by CIF name
name = value
width, height = api.resolve_video_resolution_name(name)
return CIFVideoResolution(name, width, height)
class CIFVideoResolution(VideoResolution):
"""A class representing a CIF standard resolution."""
def __init__(self, name, width, height):
"""Create a representation of a CIF resolution.
:param name: CIF standard name of the resolution.
:type name: basestring
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(CIFVideoResolution, self).__init__(width, height)
self.name = name
def __str__(self):
return "%s (%dx%d)" % (self.name, self.width, self.height)
def __repr__(self):
return "CIFVideoResolution(name=%r,width=%r,height=%r)" % (self.name, self.width, self.height)
def main():
"""Start the ROS driver and ROS node."""
rospy.init_node("axis_driver")
arg_defaults = {
'hostname': '192.168.0.90', # default IP address
'username': None, # default login name
'password': None,
'width': 704,
'height': 576,
'frame_id': 'axis_camera',
'camera_info_url': '',
'use_encrypted_password': False,
'camera_id': 1,
'auto_wakeup_camera': True,
'compression': 0,
'fps': 24,
'use_color': True,
'use_square_pixels': False,
}
args = read_args_with_defaults(arg_defaults)
axis = Axis(**args)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
axis.diagnostic_updater.update()
try:
rate.sleep()
except rospy.ROSTimeMovedBackwardsException:
rospy.logwarn("Detected jump back in time.")
class PausableDiagnosedPublisher(DiagnosedPublisher):
def __init__(self, axis, pub, diag, freq, stamp):
DiagnosedPublisher.__init__(self, pub, diag, freq, stamp)
self._axis = axis
def run(self, stat):
if self._axis._streaming_thread is None or self._axis._streaming_thread.is_paused():
stat.summary(DiagnosticStatusWrapper.OK, "Video not subscribed")
else:
stat = DiagnosedPublisher.run(self, stat)
return stat
def read_args_with_defaults(arg_defaults):
"""Look up parameters starting in the driver's private parameter space, but also searching outer namespaces.
Defining them in a higher namespace allows the axis_ptz.py script to share parameters with the driver."""
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
# resolve frame_id with tf_prefix (unless already absolute)
if args['frame_id'][0] != '/': # not absolute?
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None: # prefix defined?
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/': # prefix not absolute?
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return args
if __name__ == "__main__":
main()
|
set_resolution
|
Request a new resolution for the video stream.
:param resolution_value: The string of type `width`x`height` or a :py:class:`VideoResolution` object.
:type resolution_value: basestring|VideoResolution
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
|
#!/usr/bin/env python
"""
Axis camera video driver. Inspired by:
https://code.ros.org/svn/wg-ros-pkg/branches/trunk_cturtle/sandbox/axis_camera/axis.py
Communication with the camera is done using the Axis VAPIX API described at
http://www.axis.com/global/en/support/developer-support/vapix
.. note::
This is a major rewrite of the former ros-drivers/axis_camera node, so it contains a (deprecated) backwards
compatibility layer for the previous (non-released) API.
"""
import math
import re
import rospy
from sensor_msgs.msg import CompressedImage, CameraInfo
import camera_info_manager
import dynamic_reconfigure.server
from diagnostic_updater import Updater, DiagnosedPublisher, TimeStampStatusParam, FrequencyStatusParam, \
FunctionDiagnosticTask, DiagnosticStatusWrapper
from axis_camera.cfg import VideoStreamConfig
from axis_camera.srv import TakeSnapshot, TakeSnapshotResponse
from axis_camera.vapix import VAPIX
from axis_camera.video_streaming import ImageStreamingThread
from axis_camera.dynamic_reconfigure_tools import change_enum_items
# BACKWARDS COMPATIBILITY LAYER
StreamThread = ImageStreamingThread # deprecated
class Axis(rospy.SubscribeListener):
"""The ROS-VAPIX interface for video streaming."""
def __init__(self, hostname, username, password, width, height, frame_id, camera_info_url, use_encrypted_password,
camera_id=1, auto_wakeup_camera=True, compression=0, fps=24, use_color=True,
use_square_pixels=False):
"""Create the ROS-VAPIX interface.
:param hostname: Hostname of the camera (without http://, can be an IP address).
:type hostname: basestring
:param username: If login is needed, provide a username here.
:type username: :py:obj:`basestring` | None
:param password: If login is needed, provide a password here.
:type password: :py:obj:`basestring` | None
:param width: Width of the requested video stream in pixels (can be changed later). Must be one of the supported
resolutions. If `None`, the resolution will be chosen by height only. If also `height` is `None`,
then the default camera resolution will be used.
:type width: int|None
:param height: Height of the requested video stream in pixels (can be changed later). Must be one of the
supported resolutions. If `None`, the resolution will be chosen by width only. If also `width` is
`None`, then the default camera resolution will be used.
:type height: int|None
:param frame_id: The ROS TF frame assigned to the camera.
:type frame_id: basestring
:param camera_info_url: The URL pointing to the camera calaibration, if available.
:type camera_info_url: basestring
:param use_encrypted_password: Whether to use Plain HTTP Auth (False) or Digest HTTP Auth (True).
:type use_encrypted_password: bool
:param camera_id: ID (number) of the camera. Can be 1 to 4.
:type camera_id: int
:param auto_wakeup_camera: If True, there will be a wakeup trial after first unsuccessful network command.
:type auto_wakeup_camera: bool
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:param fps: The desired frames per second.
:type fps: int
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the requested resolution (either the `resolution`, or `width`+`height`
is not supported.
"""
# True every time the video parameters have changed and the URL has to be altered (set from other threads).
self.video_params_changed = False
self.__initializing = True
self._hostname = hostname
self._camera_id = camera_id
self.diagnostic_updater = Updater()
self.diagnostic_updater.setHardwareID(hostname)
self._api = None
# autodetect the VAPIX API and connect to it; try it forever
while self._api is None and not rospy.is_shutdown():
try:
self._api = VAPIX.get_api_for_camera(hostname, username, password, camera_id, use_encrypted_password)
except (IOError, ValueError):
rospy.loginfo("Retrying connection to VAPIX on host %s, camera %d in 2 seconds." %
(hostname, camera_id))
rospy.sleep(2)
if rospy.is_shutdown():
return
self._allowed_resolutions = self._get_allowed_resolutions()
rospy.loginfo("The following resolutions are available for camera %d:\n%s" %
(camera_id, "\n".join([str(res) for res in self._allowed_resolutions])))
rospy.set_param("~allowed_resolutions", [res.get_vapix_representation() for res in self._allowed_resolutions])
# Sometimes the camera falls into power saving mode and stops streaming.
# This setting allows the script to try to wake up the camera.
self._auto_wakeup_camera = auto_wakeup_camera
# dynamic-reconfigurable properties - definitions
self._width = None # deprecated
self._height = None # deprecated
self._resolution = None
self._compression = None
self._fps = None
self._use_color = None
self._use_square_pixels = None
# treat empty strings as None in width and height params
width = width if width != "" else None
height = height if height != "" else None
# dynamic-reconfigurable properties - defaults
if width is None and height is None:
# TODO change to perform default resolution detection from VAPIX
self.set_resolution(self._allowed_resolutions[0])
else:
resolution = self.find_resolution_by_size(width, height)
self.set_resolution(resolution.get_vapix_representation())
self.set_compression(compression)
self.set_fps(fps)
self.set_use_color(use_color)
self.set_use_square_pixels(use_square_pixels)
# only advertise the supported resolutions on dynamic reconfigure
change_enum_items(
VideoStreamConfig,
"resolution",
[{
'name': res.name if isinstance(res, CIFVideoResolution) else str(res),
'value': res.get_vapix_representation(),
'description': str(res)
} for res in self._allowed_resolutions],
self._resolution.get_vapix_representation()
)
# dynamic reconfigure server
self._video_stream_param_change_server = dynamic_reconfigure.server.Server(VideoStreamConfig,
self.reconfigure_video)
# camera info setup
self._frame_id = frame_id
self._camera_info_url = camera_info_url
# generate a valid camera name based on the hostname
self._camera_name = camera_info_manager.genCameraName(self._hostname)
self._camera_info = camera_info_manager.CameraInfoManager(cname=self._camera_name, url=self._camera_info_url)
self._camera_info.loadCameraInfo() # required before getCameraInfo()
# the thread used for streaming images (is instantiated when the first image subscriber subscribes)
self._streaming_thread = None
# the publishers are started/stopped lazily in peer_subscribe/peer_unsubscribe
self._video_publisher_frequency_diagnostic = FrequencyStatusParam({'min': self._fps, 'max': self._fps})
self._video_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("image_raw/compressed", CompressedImage, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._camera_info_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("camera_info", CameraInfo, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._snapshot_server = rospy.Service("take_snapshot", TakeSnapshot, self.take_snapshot)
self.diagnostic_updater.add(FunctionDiagnosticTask("Camera parameters", self._camera_diagnostic_callback))
# BACKWARDS COMPATIBILITY LAYER
self.username = username # deprecated
self.password = password # deprecated
self.use_encrypted_password = use_encrypted_password # deprecated
self.st = None # deprecated
self.pub = self._video_publisher # deprecated
self.caminfo_pub = self._camera_info_publisher # deprecated
self.__initializing = False
def __str__(self):
(width, height) = self._resolution.get_resolution(self._use_square_pixels)
return 'Axis driver on host %s, camera %d (%dx%d px @ %d FPS)' % \
(self._hostname, self._api.camera_id, width, height, self._fps)
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
"""Lazy-start the image-publisher."""
if self._streaming_thread is None:
self._streaming_thread = ImageStreamingThread(self)
self._streaming_thread.start()
else:
self._streaming_thread.resume()
def peer_unsubscribe(self, topic_name, num_peers):
"""Lazy-stop the image-publisher when nobody is interested"""
if num_peers == 0:
self._streaming_thread.pause()
def take_snapshot(self, request):
"""Retrieve a snapshot from the camera.
:param request: The service request.
:type request: :py:class:`axis_camera.srv.TakeSnapshotRequest`
:return: The response containing the image.
:rtype: :py:class:`axis_camera.srv.TakeSnapshotResponse`
:raises: :py:exc:`IOError`, :py:exc:`urllib2.URLError`
"""
image_data = self._api.take_snapshot()
image = CompressedImage()
image.header.stamp = rospy.Time.now()
image.header.frame_id = self._frame_id
image.format = "jpeg"
image.data = image_data
response = TakeSnapshotResponse()
response.image = image
return response
def reconfigure_video(self, config, level):
"""Dynamic reconfigure callback for video parameters.
:param config: The requested configuration.
:type config: dict
:param level: Unused here.
:type level: int
:return: The config corresponding to what was really achieved.
:rtype: dict
"""
if self.__initializing:
# in the initialization phase, we want to give precedence to the values given to the constructor
config.compression = self._compression
config.fps = self._fps
config.use_color = self._use_color
config.use_square_pixels = self._use_square_pixels
config.resolution = self._resolution.get_vapix_representation()
else:
self.__try_set_value_from_config(config, 'compression', self.set_compression)
self.__try_set_value_from_config(config, 'fps', self.set_fps)
self.__try_set_value_from_config(config, 'use_color', self.set_use_color)
self.__try_set_value_from_config(config, 'use_square_pixels', self.set_use_square_pixels)
try:
self.set_resolution(config.resolution)
except ValueError:
config.resolution = self._resolution.get_vapix_representation()
return config
def __try_set_value_from_config(self, config, field, setter):
"""First, try to call `setter(config[field])`, and if this call doesn't succeed. set the field in config to
its value stored in this class.
:param config: The dynamic reconfigure config dictionary.
:type config: dict
:param field: The field name (both in :py:obj:`config` and in :py:obj:`self`).
:type field: basestring
:param setter: The setter to use to set the value.
:type setter: lambda function
"""
try:
setter(config[field])
except ValueError:
config[field] = getattr(self, field)
#################################
# DYNAMIC RECONFIGURE CALLBACKS #
#################################
# MASKED: set_resolution function (lines 288-309)
def _get_resolution_from_param_value(self, value):
"""Return a :py:class:`VideoResolution` object corresponding to the given video resolution param string.
:param value: Value of the resolution parameter to parse (of form `width`x`height`).
:type value: basestring
:return: The :py:class:`VideoResolution` corresponding to the given resolution param string.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
for resolution in self._allowed_resolutions:
if resolution.get_vapix_representation() == value:
return resolution
raise ValueError("%s is not a valid valid resolution." % value)
def find_resolution_by_size(self, width, height):
"""Return a :py:class:`VideoResolution` object with the given dimensions.
If there are more resolutions with the same size, any of them may be returned.
:param width: Image width in pixels. If `None`, resolutions will be matched only by height.
:type width: int|None
:param height: Image height in pixels. If `None`, resolutions will be matched only by width.
:type height: int|None
:return: The corresponding resolution object.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if no resolution with the given dimensions can be found.
:raises: :py:exc:`ValueError` if both `width` and `height` are None.
"""
if width is None and height is None:
raise ValueError("Either width or height of the desired resolution must be specified.")
for resolution in self._allowed_resolutions:
size = resolution.get_resolution(use_square_pixels=False)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
size = resolution.get_resolution(use_square_pixels=True)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
raise ValueError("Cannot find a supported resolution with dimensions %sx%s" % (width, height))
def _get_allowed_resolutions(self):
"""Return a list of resolutions supported both by the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
camera_resolutions = self._get_resolutions_supported_by_camera()
return camera_resolutions
def _get_resolutions_supported_by_camera(self):
"""Return a list of resolutions supported the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
try:
names = self._api.parse_list_parameter_value(self._api.get_parameter("Properties.Image.Resolution"))
return [VideoResolution.parse_from_vapix_param_value(name, self._api) for name in names]
except (IOError, ValueError):
rospy.logwarn("Could not determine resolutions supported by the camera. Asssuming only CIF.")
return [CIFVideoResolution("CIF", 384, 288)]
def set_compression(self, compression):
"""Request the given compression level for the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
if compression != self._compression:
self._compression = self.sanitize_compression(compression)
self.video_params_changed = True
@staticmethod
def sanitize_compression(compression):
"""Make sure the given value can be used as a compression level of the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:return: The given compression converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
compression = int(compression)
if not (0 <= compression <= 100):
raise ValueError("%s is not a valid value for compression." % str(compression))
return compression
def set_fps(self, fps):
"""Request the given compression level for the video stream.
:param fps: The desired frames per second.
:type fps: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
if fps != self._fps:
self._fps = self.sanitize_fps(fps)
self.video_params_changed = True
if hasattr(self, "_video_publisher_frequency_diagnostic"):
self._video_publisher_frequency_diagnostic.freq_bound['min'] = self._fps
self._video_publisher_frequency_diagnostic.freq_bound['max'] = self._fps
@staticmethod
def sanitize_fps(fps):
"""Make sure the given value can be used as FPS of the video stream.
:param fps: The desired frames per second.
:type fps: int
:return: The given FPS converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
fps = int(fps)
if not (1 <= fps <= 30):
raise ValueError("%s is not a valid value for FPS." % str(fps))
return fps
def set_use_color(self, use_color):
"""Request using/not using color in the video stream.
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_color != self._use_color:
self._use_color = self.sanitize_bool(use_color, "use_color")
self.video_params_changed = True
def set_use_square_pixels(self, use_square_pixels):
"""Request using/not using square pixels.
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_square_pixels != self._use_square_pixels:
self._use_square_pixels = self.sanitize_bool(use_square_pixels, "use_square_pixels")
self.video_params_changed = True
@staticmethod
def sanitize_bool(value, field_name):
"""Convert the given value to a bool.
:param value: Either True, False,, "1", "0", 1 or 0.
:type value: :py:class:`basestring` | :py:class:`bool` | :py:class:`int`
:param field_name: Name of the field this value belongs to (just for debug messages).
:type field_name: basestring
:return: The bool value of the given value.
:rtype: :py:class:`bool`
:raises: :py:exc:`ValueError` if the given value is not supported in this conversion.
"""
if value not in (True, False, "1", "0", 1, 0):
raise ValueError("%s is not a valid value for %s." % (str(value), field_name))
# bool("0") returns True because it is a nonempty string
if value == "0":
return False
return bool(value)
def _camera_diagnostic_callback(self, diag_message):
assert isinstance(diag_message, DiagnosticStatusWrapper)
diag_message.summary(DiagnosticStatusWrapper.OK, "Video parameters")
diag_message.add("FPS", self._fps)
diag_message.add("Resolution", self._resolution)
diag_message.add("Compression", self._compression)
diag_message.add("Color image", self._use_color)
diag_message.add("Square pixels used", self._use_square_pixels)
class VideoResolution(object):
"""A class representing a video resolution."""
def __init__(self, width, height):
"""Create a representation of the resolution.
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(VideoResolution, self).__init__()
self.width = int(width)
self.height = int(height)
self.square_pixel_conversion_ratio_width = 12.0 / 11.0
self.square_pixel_conversion_ratio_height = 1
def __str__(self):
return "%dx%d" % (self.width, self.height)
def __repr__(self):
return "VideoResolution(width=%r,height=%r)" % (self.width, self.height)
def __eq__(self, other):
# compare by attribute values
return self.__dict__ == other.__dict__
def __ne__(self, other):
# reuse the former __eq__ definition
return not self == other
def get_resolution(self, use_square_pixels=False):
"""Get the image dimensions corresponding to this resolution.
:param use_square_pixels: Whether to strech the resulting resolution to square pixels.
:type use_square_pixels: bool
:return: A tuple (width, height)
:rtype: tuple
"""
width = self.width
height = self.height
if use_square_pixels:
width = int(math.ceil(self.square_pixel_conversion_ratio_width * self.width))
height = int(math.ceil(self.square_pixel_conversion_ratio_height * self.height))
return width, height
def get_vapix_representation(self):
return "%dx%d" % (self.width, self.height)
@staticmethod
def parse_from_vapix_param_value(value, api):
assert isinstance(value, basestring)
assert isinstance(api, VAPIX)
numeric_regexp = re.compile(r"(\d+)x(\d+)")
match = numeric_regexp.match(value)
if match is not None:
return VideoResolution(int(match.group(1)), int(match.group(2)))
else: # resolution given by CIF name
name = value
width, height = api.resolve_video_resolution_name(name)
return CIFVideoResolution(name, width, height)
class CIFVideoResolution(VideoResolution):
"""A class representing a CIF standard resolution."""
def __init__(self, name, width, height):
"""Create a representation of a CIF resolution.
:param name: CIF standard name of the resolution.
:type name: basestring
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(CIFVideoResolution, self).__init__(width, height)
self.name = name
def __str__(self):
return "%s (%dx%d)" % (self.name, self.width, self.height)
def __repr__(self):
return "CIFVideoResolution(name=%r,width=%r,height=%r)" % (self.name, self.width, self.height)
def main():
"""Start the ROS driver and ROS node."""
rospy.init_node("axis_driver")
arg_defaults = {
'hostname': '192.168.0.90', # default IP address
'username': None, # default login name
'password': None,
'width': 704,
'height': 576,
'frame_id': 'axis_camera',
'camera_info_url': '',
'use_encrypted_password': False,
'camera_id': 1,
'auto_wakeup_camera': True,
'compression': 0,
'fps': 24,
'use_color': True,
'use_square_pixels': False,
}
args = read_args_with_defaults(arg_defaults)
axis = Axis(**args)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
axis.diagnostic_updater.update()
try:
rate.sleep()
except rospy.ROSTimeMovedBackwardsException:
rospy.logwarn("Detected jump back in time.")
class PausableDiagnosedPublisher(DiagnosedPublisher):
def __init__(self, axis, pub, diag, freq, stamp):
DiagnosedPublisher.__init__(self, pub, diag, freq, stamp)
self._axis = axis
def run(self, stat):
if self._axis._streaming_thread is None or self._axis._streaming_thread.is_paused():
stat.summary(DiagnosticStatusWrapper.OK, "Video not subscribed")
else:
stat = DiagnosedPublisher.run(self, stat)
return stat
def read_args_with_defaults(arg_defaults):
"""Look up parameters starting in the driver's private parameter space, but also searching outer namespaces.
Defining them in a higher namespace allows the axis_ptz.py script to share parameters with the driver."""
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
# resolve frame_id with tf_prefix (unless already absolute)
if args['frame_id'][0] != '/': # not absolute?
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None: # prefix defined?
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/': # prefix not absolute?
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return args
if __name__ == "__main__":
main()
|
def set_resolution(self, resolution_value):
"""Request a new resolution for the video stream.
:param resolution_value: The string of type `width`x`height` or a :py:class:`VideoResolution` object.
:type resolution_value: basestring|VideoResolution
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
resolution = None
if isinstance(resolution_value, VideoResolution):
resolution = resolution_value
elif isinstance(resolution_value, basestring):
resolution = self._get_resolution_from_param_value(resolution_value)
if resolution is None:
raise ValueError("Unsupported resolution type specified: %r" % resolution_value)
if self._resolution is None or resolution != self._resolution:
self._resolution = resolution
self.video_params_changed = True
# deprecated values
self._width = resolution.get_resolution(self._use_square_pixels)[0]
self._height = resolution.get_resolution(self._use_square_pixels)[1]
| 288 | 309 |
#!/usr/bin/env python
"""
Axis camera video driver. Inspired by:
https://code.ros.org/svn/wg-ros-pkg/branches/trunk_cturtle/sandbox/axis_camera/axis.py
Communication with the camera is done using the Axis VAPIX API described at
http://www.axis.com/global/en/support/developer-support/vapix
.. note::
This is a major rewrite of the former ros-drivers/axis_camera node, so it contains a (deprecated) backwards
compatibility layer for the previous (non-released) API.
"""
import math
import re
import rospy
from sensor_msgs.msg import CompressedImage, CameraInfo
import camera_info_manager
import dynamic_reconfigure.server
from diagnostic_updater import Updater, DiagnosedPublisher, TimeStampStatusParam, FrequencyStatusParam, \
FunctionDiagnosticTask, DiagnosticStatusWrapper
from axis_camera.cfg import VideoStreamConfig
from axis_camera.srv import TakeSnapshot, TakeSnapshotResponse
from axis_camera.vapix import VAPIX
from axis_camera.video_streaming import ImageStreamingThread
from axis_camera.dynamic_reconfigure_tools import change_enum_items
# BACKWARDS COMPATIBILITY LAYER
StreamThread = ImageStreamingThread # deprecated
class Axis(rospy.SubscribeListener):
"""The ROS-VAPIX interface for video streaming."""
def __init__(self, hostname, username, password, width, height, frame_id, camera_info_url, use_encrypted_password,
camera_id=1, auto_wakeup_camera=True, compression=0, fps=24, use_color=True,
use_square_pixels=False):
"""Create the ROS-VAPIX interface.
:param hostname: Hostname of the camera (without http://, can be an IP address).
:type hostname: basestring
:param username: If login is needed, provide a username here.
:type username: :py:obj:`basestring` | None
:param password: If login is needed, provide a password here.
:type password: :py:obj:`basestring` | None
:param width: Width of the requested video stream in pixels (can be changed later). Must be one of the supported
resolutions. If `None`, the resolution will be chosen by height only. If also `height` is `None`,
then the default camera resolution will be used.
:type width: int|None
:param height: Height of the requested video stream in pixels (can be changed later). Must be one of the
supported resolutions. If `None`, the resolution will be chosen by width only. If also `width` is
`None`, then the default camera resolution will be used.
:type height: int|None
:param frame_id: The ROS TF frame assigned to the camera.
:type frame_id: basestring
:param camera_info_url: The URL pointing to the camera calaibration, if available.
:type camera_info_url: basestring
:param use_encrypted_password: Whether to use Plain HTTP Auth (False) or Digest HTTP Auth (True).
:type use_encrypted_password: bool
:param camera_id: ID (number) of the camera. Can be 1 to 4.
:type camera_id: int
:param auto_wakeup_camera: If True, there will be a wakeup trial after first unsuccessful network command.
:type auto_wakeup_camera: bool
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:param fps: The desired frames per second.
:type fps: int
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the requested resolution (either the `resolution`, or `width`+`height`
is not supported.
"""
# True every time the video parameters have changed and the URL has to be altered (set from other threads).
self.video_params_changed = False
self.__initializing = True
self._hostname = hostname
self._camera_id = camera_id
self.diagnostic_updater = Updater()
self.diagnostic_updater.setHardwareID(hostname)
self._api = None
# autodetect the VAPIX API and connect to it; try it forever
while self._api is None and not rospy.is_shutdown():
try:
self._api = VAPIX.get_api_for_camera(hostname, username, password, camera_id, use_encrypted_password)
except (IOError, ValueError):
rospy.loginfo("Retrying connection to VAPIX on host %s, camera %d in 2 seconds." %
(hostname, camera_id))
rospy.sleep(2)
if rospy.is_shutdown():
return
self._allowed_resolutions = self._get_allowed_resolutions()
rospy.loginfo("The following resolutions are available for camera %d:\n%s" %
(camera_id, "\n".join([str(res) for res in self._allowed_resolutions])))
rospy.set_param("~allowed_resolutions", [res.get_vapix_representation() for res in self._allowed_resolutions])
# Sometimes the camera falls into power saving mode and stops streaming.
# This setting allows the script to try to wake up the camera.
self._auto_wakeup_camera = auto_wakeup_camera
# dynamic-reconfigurable properties - definitions
self._width = None # deprecated
self._height = None # deprecated
self._resolution = None
self._compression = None
self._fps = None
self._use_color = None
self._use_square_pixels = None
# treat empty strings as None in width and height params
width = width if width != "" else None
height = height if height != "" else None
# dynamic-reconfigurable properties - defaults
if width is None and height is None:
# TODO change to perform default resolution detection from VAPIX
self.set_resolution(self._allowed_resolutions[0])
else:
resolution = self.find_resolution_by_size(width, height)
self.set_resolution(resolution.get_vapix_representation())
self.set_compression(compression)
self.set_fps(fps)
self.set_use_color(use_color)
self.set_use_square_pixels(use_square_pixels)
# only advertise the supported resolutions on dynamic reconfigure
change_enum_items(
VideoStreamConfig,
"resolution",
[{
'name': res.name if isinstance(res, CIFVideoResolution) else str(res),
'value': res.get_vapix_representation(),
'description': str(res)
} for res in self._allowed_resolutions],
self._resolution.get_vapix_representation()
)
# dynamic reconfigure server
self._video_stream_param_change_server = dynamic_reconfigure.server.Server(VideoStreamConfig,
self.reconfigure_video)
# camera info setup
self._frame_id = frame_id
self._camera_info_url = camera_info_url
# generate a valid camera name based on the hostname
self._camera_name = camera_info_manager.genCameraName(self._hostname)
self._camera_info = camera_info_manager.CameraInfoManager(cname=self._camera_name, url=self._camera_info_url)
self._camera_info.loadCameraInfo() # required before getCameraInfo()
# the thread used for streaming images (is instantiated when the first image subscriber subscribes)
self._streaming_thread = None
# the publishers are started/stopped lazily in peer_subscribe/peer_unsubscribe
self._video_publisher_frequency_diagnostic = FrequencyStatusParam({'min': self._fps, 'max': self._fps})
self._video_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("image_raw/compressed", CompressedImage, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._camera_info_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("camera_info", CameraInfo, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._snapshot_server = rospy.Service("take_snapshot", TakeSnapshot, self.take_snapshot)
self.diagnostic_updater.add(FunctionDiagnosticTask("Camera parameters", self._camera_diagnostic_callback))
# BACKWARDS COMPATIBILITY LAYER
self.username = username # deprecated
self.password = password # deprecated
self.use_encrypted_password = use_encrypted_password # deprecated
self.st = None # deprecated
self.pub = self._video_publisher # deprecated
self.caminfo_pub = self._camera_info_publisher # deprecated
self.__initializing = False
def __str__(self):
(width, height) = self._resolution.get_resolution(self._use_square_pixels)
return 'Axis driver on host %s, camera %d (%dx%d px @ %d FPS)' % \
(self._hostname, self._api.camera_id, width, height, self._fps)
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
"""Lazy-start the image-publisher."""
if self._streaming_thread is None:
self._streaming_thread = ImageStreamingThread(self)
self._streaming_thread.start()
else:
self._streaming_thread.resume()
def peer_unsubscribe(self, topic_name, num_peers):
"""Lazy-stop the image-publisher when nobody is interested"""
if num_peers == 0:
self._streaming_thread.pause()
def take_snapshot(self, request):
"""Retrieve a snapshot from the camera.
:param request: The service request.
:type request: :py:class:`axis_camera.srv.TakeSnapshotRequest`
:return: The response containing the image.
:rtype: :py:class:`axis_camera.srv.TakeSnapshotResponse`
:raises: :py:exc:`IOError`, :py:exc:`urllib2.URLError`
"""
image_data = self._api.take_snapshot()
image = CompressedImage()
image.header.stamp = rospy.Time.now()
image.header.frame_id = self._frame_id
image.format = "jpeg"
image.data = image_data
response = TakeSnapshotResponse()
response.image = image
return response
def reconfigure_video(self, config, level):
"""Dynamic reconfigure callback for video parameters.
:param config: The requested configuration.
:type config: dict
:param level: Unused here.
:type level: int
:return: The config corresponding to what was really achieved.
:rtype: dict
"""
if self.__initializing:
# in the initialization phase, we want to give precedence to the values given to the constructor
config.compression = self._compression
config.fps = self._fps
config.use_color = self._use_color
config.use_square_pixels = self._use_square_pixels
config.resolution = self._resolution.get_vapix_representation()
else:
self.__try_set_value_from_config(config, 'compression', self.set_compression)
self.__try_set_value_from_config(config, 'fps', self.set_fps)
self.__try_set_value_from_config(config, 'use_color', self.set_use_color)
self.__try_set_value_from_config(config, 'use_square_pixels', self.set_use_square_pixels)
try:
self.set_resolution(config.resolution)
except ValueError:
config.resolution = self._resolution.get_vapix_representation()
return config
def __try_set_value_from_config(self, config, field, setter):
"""First, try to call `setter(config[field])`, and if this call doesn't succeed. set the field in config to
its value stored in this class.
:param config: The dynamic reconfigure config dictionary.
:type config: dict
:param field: The field name (both in :py:obj:`config` and in :py:obj:`self`).
:type field: basestring
:param setter: The setter to use to set the value.
:type setter: lambda function
"""
try:
setter(config[field])
except ValueError:
config[field] = getattr(self, field)
#################################
# DYNAMIC RECONFIGURE CALLBACKS #
#################################
def set_resolution(self, resolution_value):
"""Request a new resolution for the video stream.
:param resolution_value: The string of type `width`x`height` or a :py:class:`VideoResolution` object.
:type resolution_value: basestring|VideoResolution
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
resolution = None
if isinstance(resolution_value, VideoResolution):
resolution = resolution_value
elif isinstance(resolution_value, basestring):
resolution = self._get_resolution_from_param_value(resolution_value)
if resolution is None:
raise ValueError("Unsupported resolution type specified: %r" % resolution_value)
if self._resolution is None or resolution != self._resolution:
self._resolution = resolution
self.video_params_changed = True
# deprecated values
self._width = resolution.get_resolution(self._use_square_pixels)[0]
self._height = resolution.get_resolution(self._use_square_pixels)[1]
def _get_resolution_from_param_value(self, value):
"""Return a :py:class:`VideoResolution` object corresponding to the given video resolution param string.
:param value: Value of the resolution parameter to parse (of form `width`x`height`).
:type value: basestring
:return: The :py:class:`VideoResolution` corresponding to the given resolution param string.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
for resolution in self._allowed_resolutions:
if resolution.get_vapix_representation() == value:
return resolution
raise ValueError("%s is not a valid valid resolution." % value)
def find_resolution_by_size(self, width, height):
"""Return a :py:class:`VideoResolution` object with the given dimensions.
If there are more resolutions with the same size, any of them may be returned.
:param width: Image width in pixels. If `None`, resolutions will be matched only by height.
:type width: int|None
:param height: Image height in pixels. If `None`, resolutions will be matched only by width.
:type height: int|None
:return: The corresponding resolution object.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if no resolution with the given dimensions can be found.
:raises: :py:exc:`ValueError` if both `width` and `height` are None.
"""
if width is None and height is None:
raise ValueError("Either width or height of the desired resolution must be specified.")
for resolution in self._allowed_resolutions:
size = resolution.get_resolution(use_square_pixels=False)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
size = resolution.get_resolution(use_square_pixels=True)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
raise ValueError("Cannot find a supported resolution with dimensions %sx%s" % (width, height))
def _get_allowed_resolutions(self):
"""Return a list of resolutions supported both by the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
camera_resolutions = self._get_resolutions_supported_by_camera()
return camera_resolutions
def _get_resolutions_supported_by_camera(self):
"""Return a list of resolutions supported the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
try:
names = self._api.parse_list_parameter_value(self._api.get_parameter("Properties.Image.Resolution"))
return [VideoResolution.parse_from_vapix_param_value(name, self._api) for name in names]
except (IOError, ValueError):
rospy.logwarn("Could not determine resolutions supported by the camera. Asssuming only CIF.")
return [CIFVideoResolution("CIF", 384, 288)]
def set_compression(self, compression):
"""Request the given compression level for the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
if compression != self._compression:
self._compression = self.sanitize_compression(compression)
self.video_params_changed = True
@staticmethod
def sanitize_compression(compression):
"""Make sure the given value can be used as a compression level of the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:return: The given compression converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
compression = int(compression)
if not (0 <= compression <= 100):
raise ValueError("%s is not a valid value for compression." % str(compression))
return compression
def set_fps(self, fps):
"""Request the given compression level for the video stream.
:param fps: The desired frames per second.
:type fps: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
if fps != self._fps:
self._fps = self.sanitize_fps(fps)
self.video_params_changed = True
if hasattr(self, "_video_publisher_frequency_diagnostic"):
self._video_publisher_frequency_diagnostic.freq_bound['min'] = self._fps
self._video_publisher_frequency_diagnostic.freq_bound['max'] = self._fps
@staticmethod
def sanitize_fps(fps):
"""Make sure the given value can be used as FPS of the video stream.
:param fps: The desired frames per second.
:type fps: int
:return: The given FPS converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
fps = int(fps)
if not (1 <= fps <= 30):
raise ValueError("%s is not a valid value for FPS." % str(fps))
return fps
def set_use_color(self, use_color):
"""Request using/not using color in the video stream.
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_color != self._use_color:
self._use_color = self.sanitize_bool(use_color, "use_color")
self.video_params_changed = True
def set_use_square_pixels(self, use_square_pixels):
"""Request using/not using square pixels.
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_square_pixels != self._use_square_pixels:
self._use_square_pixels = self.sanitize_bool(use_square_pixels, "use_square_pixels")
self.video_params_changed = True
@staticmethod
def sanitize_bool(value, field_name):
"""Convert the given value to a bool.
:param value: Either True, False,, "1", "0", 1 or 0.
:type value: :py:class:`basestring` | :py:class:`bool` | :py:class:`int`
:param field_name: Name of the field this value belongs to (just for debug messages).
:type field_name: basestring
:return: The bool value of the given value.
:rtype: :py:class:`bool`
:raises: :py:exc:`ValueError` if the given value is not supported in this conversion.
"""
if value not in (True, False, "1", "0", 1, 0):
raise ValueError("%s is not a valid value for %s." % (str(value), field_name))
# bool("0") returns True because it is a nonempty string
if value == "0":
return False
return bool(value)
def _camera_diagnostic_callback(self, diag_message):
assert isinstance(diag_message, DiagnosticStatusWrapper)
diag_message.summary(DiagnosticStatusWrapper.OK, "Video parameters")
diag_message.add("FPS", self._fps)
diag_message.add("Resolution", self._resolution)
diag_message.add("Compression", self._compression)
diag_message.add("Color image", self._use_color)
diag_message.add("Square pixels used", self._use_square_pixels)
class VideoResolution(object):
"""A class representing a video resolution."""
def __init__(self, width, height):
"""Create a representation of the resolution.
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(VideoResolution, self).__init__()
self.width = int(width)
self.height = int(height)
self.square_pixel_conversion_ratio_width = 12.0 / 11.0
self.square_pixel_conversion_ratio_height = 1
def __str__(self):
return "%dx%d" % (self.width, self.height)
def __repr__(self):
return "VideoResolution(width=%r,height=%r)" % (self.width, self.height)
def __eq__(self, other):
# compare by attribute values
return self.__dict__ == other.__dict__
def __ne__(self, other):
# reuse the former __eq__ definition
return not self == other
def get_resolution(self, use_square_pixels=False):
"""Get the image dimensions corresponding to this resolution.
:param use_square_pixels: Whether to strech the resulting resolution to square pixels.
:type use_square_pixels: bool
:return: A tuple (width, height)
:rtype: tuple
"""
width = self.width
height = self.height
if use_square_pixels:
width = int(math.ceil(self.square_pixel_conversion_ratio_width * self.width))
height = int(math.ceil(self.square_pixel_conversion_ratio_height * self.height))
return width, height
def get_vapix_representation(self):
return "%dx%d" % (self.width, self.height)
@staticmethod
def parse_from_vapix_param_value(value, api):
assert isinstance(value, basestring)
assert isinstance(api, VAPIX)
numeric_regexp = re.compile(r"(\d+)x(\d+)")
match = numeric_regexp.match(value)
if match is not None:
return VideoResolution(int(match.group(1)), int(match.group(2)))
else: # resolution given by CIF name
name = value
width, height = api.resolve_video_resolution_name(name)
return CIFVideoResolution(name, width, height)
class CIFVideoResolution(VideoResolution):
"""A class representing a CIF standard resolution."""
def __init__(self, name, width, height):
"""Create a representation of a CIF resolution.
:param name: CIF standard name of the resolution.
:type name: basestring
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(CIFVideoResolution, self).__init__(width, height)
self.name = name
def __str__(self):
return "%s (%dx%d)" % (self.name, self.width, self.height)
def __repr__(self):
return "CIFVideoResolution(name=%r,width=%r,height=%r)" % (self.name, self.width, self.height)
def main():
"""Start the ROS driver and ROS node."""
rospy.init_node("axis_driver")
arg_defaults = {
'hostname': '192.168.0.90', # default IP address
'username': None, # default login name
'password': None,
'width': 704,
'height': 576,
'frame_id': 'axis_camera',
'camera_info_url': '',
'use_encrypted_password': False,
'camera_id': 1,
'auto_wakeup_camera': True,
'compression': 0,
'fps': 24,
'use_color': True,
'use_square_pixels': False,
}
args = read_args_with_defaults(arg_defaults)
axis = Axis(**args)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
axis.diagnostic_updater.update()
try:
rate.sleep()
except rospy.ROSTimeMovedBackwardsException:
rospy.logwarn("Detected jump back in time.")
class PausableDiagnosedPublisher(DiagnosedPublisher):
def __init__(self, axis, pub, diag, freq, stamp):
DiagnosedPublisher.__init__(self, pub, diag, freq, stamp)
self._axis = axis
def run(self, stat):
if self._axis._streaming_thread is None or self._axis._streaming_thread.is_paused():
stat.summary(DiagnosticStatusWrapper.OK, "Video not subscribed")
else:
stat = DiagnosedPublisher.run(self, stat)
return stat
def read_args_with_defaults(arg_defaults):
"""Look up parameters starting in the driver's private parameter space, but also searching outer namespaces.
Defining them in a higher namespace allows the axis_ptz.py script to share parameters with the driver."""
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
# resolve frame_id with tf_prefix (unless already absolute)
if args['frame_id'][0] != '/': # not absolute?
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None: # prefix defined?
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/': # prefix not absolute?
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return args
if __name__ == "__main__":
main()
|
__init__
|
Create a representation of the resolution.
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
|
#!/usr/bin/env python
"""
Axis camera video driver. Inspired by:
https://code.ros.org/svn/wg-ros-pkg/branches/trunk_cturtle/sandbox/axis_camera/axis.py
Communication with the camera is done using the Axis VAPIX API described at
http://www.axis.com/global/en/support/developer-support/vapix
.. note::
This is a major rewrite of the former ros-drivers/axis_camera node, so it contains a (deprecated) backwards
compatibility layer for the previous (non-released) API.
"""
import math
import re
import rospy
from sensor_msgs.msg import CompressedImage, CameraInfo
import camera_info_manager
import dynamic_reconfigure.server
from diagnostic_updater import Updater, DiagnosedPublisher, TimeStampStatusParam, FrequencyStatusParam, \
FunctionDiagnosticTask, DiagnosticStatusWrapper
from axis_camera.cfg import VideoStreamConfig
from axis_camera.srv import TakeSnapshot, TakeSnapshotResponse
from axis_camera.vapix import VAPIX
from axis_camera.video_streaming import ImageStreamingThread
from axis_camera.dynamic_reconfigure_tools import change_enum_items
# BACKWARDS COMPATIBILITY LAYER
StreamThread = ImageStreamingThread # deprecated
class Axis(rospy.SubscribeListener):
"""The ROS-VAPIX interface for video streaming."""
def __init__(self, hostname, username, password, width, height, frame_id, camera_info_url, use_encrypted_password,
camera_id=1, auto_wakeup_camera=True, compression=0, fps=24, use_color=True,
use_square_pixels=False):
"""Create the ROS-VAPIX interface.
:param hostname: Hostname of the camera (without http://, can be an IP address).
:type hostname: basestring
:param username: If login is needed, provide a username here.
:type username: :py:obj:`basestring` | None
:param password: If login is needed, provide a password here.
:type password: :py:obj:`basestring` | None
:param width: Width of the requested video stream in pixels (can be changed later). Must be one of the supported
resolutions. If `None`, the resolution will be chosen by height only. If also `height` is `None`,
then the default camera resolution will be used.
:type width: int|None
:param height: Height of the requested video stream in pixels (can be changed later). Must be one of the
supported resolutions. If `None`, the resolution will be chosen by width only. If also `width` is
`None`, then the default camera resolution will be used.
:type height: int|None
:param frame_id: The ROS TF frame assigned to the camera.
:type frame_id: basestring
:param camera_info_url: The URL pointing to the camera calaibration, if available.
:type camera_info_url: basestring
:param use_encrypted_password: Whether to use Plain HTTP Auth (False) or Digest HTTP Auth (True).
:type use_encrypted_password: bool
:param camera_id: ID (number) of the camera. Can be 1 to 4.
:type camera_id: int
:param auto_wakeup_camera: If True, there will be a wakeup trial after first unsuccessful network command.
:type auto_wakeup_camera: bool
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:param fps: The desired frames per second.
:type fps: int
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the requested resolution (either the `resolution`, or `width`+`height`
is not supported.
"""
# True every time the video parameters have changed and the URL has to be altered (set from other threads).
self.video_params_changed = False
self.__initializing = True
self._hostname = hostname
self._camera_id = camera_id
self.diagnostic_updater = Updater()
self.diagnostic_updater.setHardwareID(hostname)
self._api = None
# autodetect the VAPIX API and connect to it; try it forever
while self._api is None and not rospy.is_shutdown():
try:
self._api = VAPIX.get_api_for_camera(hostname, username, password, camera_id, use_encrypted_password)
except (IOError, ValueError):
rospy.loginfo("Retrying connection to VAPIX on host %s, camera %d in 2 seconds." %
(hostname, camera_id))
rospy.sleep(2)
if rospy.is_shutdown():
return
self._allowed_resolutions = self._get_allowed_resolutions()
rospy.loginfo("The following resolutions are available for camera %d:\n%s" %
(camera_id, "\n".join([str(res) for res in self._allowed_resolutions])))
rospy.set_param("~allowed_resolutions", [res.get_vapix_representation() for res in self._allowed_resolutions])
# Sometimes the camera falls into power saving mode and stops streaming.
# This setting allows the script to try to wake up the camera.
self._auto_wakeup_camera = auto_wakeup_camera
# dynamic-reconfigurable properties - definitions
self._width = None # deprecated
self._height = None # deprecated
self._resolution = None
self._compression = None
self._fps = None
self._use_color = None
self._use_square_pixels = None
# treat empty strings as None in width and height params
width = width if width != "" else None
height = height if height != "" else None
# dynamic-reconfigurable properties - defaults
if width is None and height is None:
# TODO change to perform default resolution detection from VAPIX
self.set_resolution(self._allowed_resolutions[0])
else:
resolution = self.find_resolution_by_size(width, height)
self.set_resolution(resolution.get_vapix_representation())
self.set_compression(compression)
self.set_fps(fps)
self.set_use_color(use_color)
self.set_use_square_pixels(use_square_pixels)
# only advertise the supported resolutions on dynamic reconfigure
change_enum_items(
VideoStreamConfig,
"resolution",
[{
'name': res.name if isinstance(res, CIFVideoResolution) else str(res),
'value': res.get_vapix_representation(),
'description': str(res)
} for res in self._allowed_resolutions],
self._resolution.get_vapix_representation()
)
# dynamic reconfigure server
self._video_stream_param_change_server = dynamic_reconfigure.server.Server(VideoStreamConfig,
self.reconfigure_video)
# camera info setup
self._frame_id = frame_id
self._camera_info_url = camera_info_url
# generate a valid camera name based on the hostname
self._camera_name = camera_info_manager.genCameraName(self._hostname)
self._camera_info = camera_info_manager.CameraInfoManager(cname=self._camera_name, url=self._camera_info_url)
self._camera_info.loadCameraInfo() # required before getCameraInfo()
# the thread used for streaming images (is instantiated when the first image subscriber subscribes)
self._streaming_thread = None
# the publishers are started/stopped lazily in peer_subscribe/peer_unsubscribe
self._video_publisher_frequency_diagnostic = FrequencyStatusParam({'min': self._fps, 'max': self._fps})
self._video_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("image_raw/compressed", CompressedImage, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._camera_info_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("camera_info", CameraInfo, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._snapshot_server = rospy.Service("take_snapshot", TakeSnapshot, self.take_snapshot)
self.diagnostic_updater.add(FunctionDiagnosticTask("Camera parameters", self._camera_diagnostic_callback))
# BACKWARDS COMPATIBILITY LAYER
self.username = username # deprecated
self.password = password # deprecated
self.use_encrypted_password = use_encrypted_password # deprecated
self.st = None # deprecated
self.pub = self._video_publisher # deprecated
self.caminfo_pub = self._camera_info_publisher # deprecated
self.__initializing = False
def __str__(self):
(width, height) = self._resolution.get_resolution(self._use_square_pixels)
return 'Axis driver on host %s, camera %d (%dx%d px @ %d FPS)' % \
(self._hostname, self._api.camera_id, width, height, self._fps)
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
"""Lazy-start the image-publisher."""
if self._streaming_thread is None:
self._streaming_thread = ImageStreamingThread(self)
self._streaming_thread.start()
else:
self._streaming_thread.resume()
def peer_unsubscribe(self, topic_name, num_peers):
"""Lazy-stop the image-publisher when nobody is interested"""
if num_peers == 0:
self._streaming_thread.pause()
def take_snapshot(self, request):
"""Retrieve a snapshot from the camera.
:param request: The service request.
:type request: :py:class:`axis_camera.srv.TakeSnapshotRequest`
:return: The response containing the image.
:rtype: :py:class:`axis_camera.srv.TakeSnapshotResponse`
:raises: :py:exc:`IOError`, :py:exc:`urllib2.URLError`
"""
image_data = self._api.take_snapshot()
image = CompressedImage()
image.header.stamp = rospy.Time.now()
image.header.frame_id = self._frame_id
image.format = "jpeg"
image.data = image_data
response = TakeSnapshotResponse()
response.image = image
return response
def reconfigure_video(self, config, level):
"""Dynamic reconfigure callback for video parameters.
:param config: The requested configuration.
:type config: dict
:param level: Unused here.
:type level: int
:return: The config corresponding to what was really achieved.
:rtype: dict
"""
if self.__initializing:
# in the initialization phase, we want to give precedence to the values given to the constructor
config.compression = self._compression
config.fps = self._fps
config.use_color = self._use_color
config.use_square_pixels = self._use_square_pixels
config.resolution = self._resolution.get_vapix_representation()
else:
self.__try_set_value_from_config(config, 'compression', self.set_compression)
self.__try_set_value_from_config(config, 'fps', self.set_fps)
self.__try_set_value_from_config(config, 'use_color', self.set_use_color)
self.__try_set_value_from_config(config, 'use_square_pixels', self.set_use_square_pixels)
try:
self.set_resolution(config.resolution)
except ValueError:
config.resolution = self._resolution.get_vapix_representation()
return config
def __try_set_value_from_config(self, config, field, setter):
"""First, try to call `setter(config[field])`, and if this call doesn't succeed. set the field in config to
its value stored in this class.
:param config: The dynamic reconfigure config dictionary.
:type config: dict
:param field: The field name (both in :py:obj:`config` and in :py:obj:`self`).
:type field: basestring
:param setter: The setter to use to set the value.
:type setter: lambda function
"""
try:
setter(config[field])
except ValueError:
config[field] = getattr(self, field)
#################################
# DYNAMIC RECONFIGURE CALLBACKS #
#################################
def set_resolution(self, resolution_value):
"""Request a new resolution for the video stream.
:param resolution_value: The string of type `width`x`height` or a :py:class:`VideoResolution` object.
:type resolution_value: basestring|VideoResolution
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
resolution = None
if isinstance(resolution_value, VideoResolution):
resolution = resolution_value
elif isinstance(resolution_value, basestring):
resolution = self._get_resolution_from_param_value(resolution_value)
if resolution is None:
raise ValueError("Unsupported resolution type specified: %r" % resolution_value)
if self._resolution is None or resolution != self._resolution:
self._resolution = resolution
self.video_params_changed = True
# deprecated values
self._width = resolution.get_resolution(self._use_square_pixels)[0]
self._height = resolution.get_resolution(self._use_square_pixels)[1]
def _get_resolution_from_param_value(self, value):
"""Return a :py:class:`VideoResolution` object corresponding to the given video resolution param string.
:param value: Value of the resolution parameter to parse (of form `width`x`height`).
:type value: basestring
:return: The :py:class:`VideoResolution` corresponding to the given resolution param string.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
for resolution in self._allowed_resolutions:
if resolution.get_vapix_representation() == value:
return resolution
raise ValueError("%s is not a valid valid resolution." % value)
def find_resolution_by_size(self, width, height):
"""Return a :py:class:`VideoResolution` object with the given dimensions.
If there are more resolutions with the same size, any of them may be returned.
:param width: Image width in pixels. If `None`, resolutions will be matched only by height.
:type width: int|None
:param height: Image height in pixels. If `None`, resolutions will be matched only by width.
:type height: int|None
:return: The corresponding resolution object.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if no resolution with the given dimensions can be found.
:raises: :py:exc:`ValueError` if both `width` and `height` are None.
"""
if width is None and height is None:
raise ValueError("Either width or height of the desired resolution must be specified.")
for resolution in self._allowed_resolutions:
size = resolution.get_resolution(use_square_pixels=False)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
size = resolution.get_resolution(use_square_pixels=True)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
raise ValueError("Cannot find a supported resolution with dimensions %sx%s" % (width, height))
def _get_allowed_resolutions(self):
"""Return a list of resolutions supported both by the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
camera_resolutions = self._get_resolutions_supported_by_camera()
return camera_resolutions
def _get_resolutions_supported_by_camera(self):
"""Return a list of resolutions supported the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
try:
names = self._api.parse_list_parameter_value(self._api.get_parameter("Properties.Image.Resolution"))
return [VideoResolution.parse_from_vapix_param_value(name, self._api) for name in names]
except (IOError, ValueError):
rospy.logwarn("Could not determine resolutions supported by the camera. Asssuming only CIF.")
return [CIFVideoResolution("CIF", 384, 288)]
def set_compression(self, compression):
"""Request the given compression level for the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
if compression != self._compression:
self._compression = self.sanitize_compression(compression)
self.video_params_changed = True
@staticmethod
def sanitize_compression(compression):
"""Make sure the given value can be used as a compression level of the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:return: The given compression converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
compression = int(compression)
if not (0 <= compression <= 100):
raise ValueError("%s is not a valid value for compression." % str(compression))
return compression
def set_fps(self, fps):
"""Request the given compression level for the video stream.
:param fps: The desired frames per second.
:type fps: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
if fps != self._fps:
self._fps = self.sanitize_fps(fps)
self.video_params_changed = True
if hasattr(self, "_video_publisher_frequency_diagnostic"):
self._video_publisher_frequency_diagnostic.freq_bound['min'] = self._fps
self._video_publisher_frequency_diagnostic.freq_bound['max'] = self._fps
@staticmethod
def sanitize_fps(fps):
"""Make sure the given value can be used as FPS of the video stream.
:param fps: The desired frames per second.
:type fps: int
:return: The given FPS converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
fps = int(fps)
if not (1 <= fps <= 30):
raise ValueError("%s is not a valid value for FPS." % str(fps))
return fps
def set_use_color(self, use_color):
"""Request using/not using color in the video stream.
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_color != self._use_color:
self._use_color = self.sanitize_bool(use_color, "use_color")
self.video_params_changed = True
def set_use_square_pixels(self, use_square_pixels):
"""Request using/not using square pixels.
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_square_pixels != self._use_square_pixels:
self._use_square_pixels = self.sanitize_bool(use_square_pixels, "use_square_pixels")
self.video_params_changed = True
@staticmethod
def sanitize_bool(value, field_name):
"""Convert the given value to a bool.
:param value: Either True, False,, "1", "0", 1 or 0.
:type value: :py:class:`basestring` | :py:class:`bool` | :py:class:`int`
:param field_name: Name of the field this value belongs to (just for debug messages).
:type field_name: basestring
:return: The bool value of the given value.
:rtype: :py:class:`bool`
:raises: :py:exc:`ValueError` if the given value is not supported in this conversion.
"""
if value not in (True, False, "1", "0", 1, 0):
raise ValueError("%s is not a valid value for %s." % (str(value), field_name))
# bool("0") returns True because it is a nonempty string
if value == "0":
return False
return bool(value)
def _camera_diagnostic_callback(self, diag_message):
assert isinstance(diag_message, DiagnosticStatusWrapper)
diag_message.summary(DiagnosticStatusWrapper.OK, "Video parameters")
diag_message.add("FPS", self._fps)
diag_message.add("Resolution", self._resolution)
diag_message.add("Compression", self._compression)
diag_message.add("Color image", self._use_color)
diag_message.add("Square pixels used", self._use_square_pixels)
class VideoResolution(object):
"""A class representing a video resolution."""
# MASKED: __init__ function (lines 492-506)
def __str__(self):
return "%dx%d" % (self.width, self.height)
def __repr__(self):
return "VideoResolution(width=%r,height=%r)" % (self.width, self.height)
def __eq__(self, other):
# compare by attribute values
return self.__dict__ == other.__dict__
def __ne__(self, other):
# reuse the former __eq__ definition
return not self == other
def get_resolution(self, use_square_pixels=False):
"""Get the image dimensions corresponding to this resolution.
:param use_square_pixels: Whether to strech the resulting resolution to square pixels.
:type use_square_pixels: bool
:return: A tuple (width, height)
:rtype: tuple
"""
width = self.width
height = self.height
if use_square_pixels:
width = int(math.ceil(self.square_pixel_conversion_ratio_width * self.width))
height = int(math.ceil(self.square_pixel_conversion_ratio_height * self.height))
return width, height
def get_vapix_representation(self):
return "%dx%d" % (self.width, self.height)
@staticmethod
def parse_from_vapix_param_value(value, api):
assert isinstance(value, basestring)
assert isinstance(api, VAPIX)
numeric_regexp = re.compile(r"(\d+)x(\d+)")
match = numeric_regexp.match(value)
if match is not None:
return VideoResolution(int(match.group(1)), int(match.group(2)))
else: # resolution given by CIF name
name = value
width, height = api.resolve_video_resolution_name(name)
return CIFVideoResolution(name, width, height)
class CIFVideoResolution(VideoResolution):
"""A class representing a CIF standard resolution."""
def __init__(self, name, width, height):
"""Create a representation of a CIF resolution.
:param name: CIF standard name of the resolution.
:type name: basestring
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(CIFVideoResolution, self).__init__(width, height)
self.name = name
def __str__(self):
return "%s (%dx%d)" % (self.name, self.width, self.height)
def __repr__(self):
return "CIFVideoResolution(name=%r,width=%r,height=%r)" % (self.name, self.width, self.height)
def main():
"""Start the ROS driver and ROS node."""
rospy.init_node("axis_driver")
arg_defaults = {
'hostname': '192.168.0.90', # default IP address
'username': None, # default login name
'password': None,
'width': 704,
'height': 576,
'frame_id': 'axis_camera',
'camera_info_url': '',
'use_encrypted_password': False,
'camera_id': 1,
'auto_wakeup_camera': True,
'compression': 0,
'fps': 24,
'use_color': True,
'use_square_pixels': False,
}
args = read_args_with_defaults(arg_defaults)
axis = Axis(**args)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
axis.diagnostic_updater.update()
try:
rate.sleep()
except rospy.ROSTimeMovedBackwardsException:
rospy.logwarn("Detected jump back in time.")
class PausableDiagnosedPublisher(DiagnosedPublisher):
def __init__(self, axis, pub, diag, freq, stamp):
DiagnosedPublisher.__init__(self, pub, diag, freq, stamp)
self._axis = axis
def run(self, stat):
if self._axis._streaming_thread is None or self._axis._streaming_thread.is_paused():
stat.summary(DiagnosticStatusWrapper.OK, "Video not subscribed")
else:
stat = DiagnosedPublisher.run(self, stat)
return stat
def read_args_with_defaults(arg_defaults):
"""Look up parameters starting in the driver's private parameter space, but also searching outer namespaces.
Defining them in a higher namespace allows the axis_ptz.py script to share parameters with the driver."""
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
# resolve frame_id with tf_prefix (unless already absolute)
if args['frame_id'][0] != '/': # not absolute?
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None: # prefix defined?
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/': # prefix not absolute?
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return args
if __name__ == "__main__":
main()
|
def __init__(self, width, height):
"""Create a representation of the resolution.
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(VideoResolution, self).__init__()
self.width = int(width)
self.height = int(height)
self.square_pixel_conversion_ratio_width = 12.0 / 11.0
self.square_pixel_conversion_ratio_height = 1
| 492 | 506 |
#!/usr/bin/env python
"""
Axis camera video driver. Inspired by:
https://code.ros.org/svn/wg-ros-pkg/branches/trunk_cturtle/sandbox/axis_camera/axis.py
Communication with the camera is done using the Axis VAPIX API described at
http://www.axis.com/global/en/support/developer-support/vapix
.. note::
This is a major rewrite of the former ros-drivers/axis_camera node, so it contains a (deprecated) backwards
compatibility layer for the previous (non-released) API.
"""
import math
import re
import rospy
from sensor_msgs.msg import CompressedImage, CameraInfo
import camera_info_manager
import dynamic_reconfigure.server
from diagnostic_updater import Updater, DiagnosedPublisher, TimeStampStatusParam, FrequencyStatusParam, \
FunctionDiagnosticTask, DiagnosticStatusWrapper
from axis_camera.cfg import VideoStreamConfig
from axis_camera.srv import TakeSnapshot, TakeSnapshotResponse
from axis_camera.vapix import VAPIX
from axis_camera.video_streaming import ImageStreamingThread
from axis_camera.dynamic_reconfigure_tools import change_enum_items
# BACKWARDS COMPATIBILITY LAYER
StreamThread = ImageStreamingThread # deprecated
class Axis(rospy.SubscribeListener):
"""The ROS-VAPIX interface for video streaming."""
def __init__(self, hostname, username, password, width, height, frame_id, camera_info_url, use_encrypted_password,
camera_id=1, auto_wakeup_camera=True, compression=0, fps=24, use_color=True,
use_square_pixels=False):
"""Create the ROS-VAPIX interface.
:param hostname: Hostname of the camera (without http://, can be an IP address).
:type hostname: basestring
:param username: If login is needed, provide a username here.
:type username: :py:obj:`basestring` | None
:param password: If login is needed, provide a password here.
:type password: :py:obj:`basestring` | None
:param width: Width of the requested video stream in pixels (can be changed later). Must be one of the supported
resolutions. If `None`, the resolution will be chosen by height only. If also `height` is `None`,
then the default camera resolution will be used.
:type width: int|None
:param height: Height of the requested video stream in pixels (can be changed later). Must be one of the
supported resolutions. If `None`, the resolution will be chosen by width only. If also `width` is
`None`, then the default camera resolution will be used.
:type height: int|None
:param frame_id: The ROS TF frame assigned to the camera.
:type frame_id: basestring
:param camera_info_url: The URL pointing to the camera calaibration, if available.
:type camera_info_url: basestring
:param use_encrypted_password: Whether to use Plain HTTP Auth (False) or Digest HTTP Auth (True).
:type use_encrypted_password: bool
:param camera_id: ID (number) of the camera. Can be 1 to 4.
:type camera_id: int
:param auto_wakeup_camera: If True, there will be a wakeup trial after first unsuccessful network command.
:type auto_wakeup_camera: bool
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:param fps: The desired frames per second.
:type fps: int
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the requested resolution (either the `resolution`, or `width`+`height`
is not supported.
"""
# True every time the video parameters have changed and the URL has to be altered (set from other threads).
self.video_params_changed = False
self.__initializing = True
self._hostname = hostname
self._camera_id = camera_id
self.diagnostic_updater = Updater()
self.diagnostic_updater.setHardwareID(hostname)
self._api = None
# autodetect the VAPIX API and connect to it; try it forever
while self._api is None and not rospy.is_shutdown():
try:
self._api = VAPIX.get_api_for_camera(hostname, username, password, camera_id, use_encrypted_password)
except (IOError, ValueError):
rospy.loginfo("Retrying connection to VAPIX on host %s, camera %d in 2 seconds." %
(hostname, camera_id))
rospy.sleep(2)
if rospy.is_shutdown():
return
self._allowed_resolutions = self._get_allowed_resolutions()
rospy.loginfo("The following resolutions are available for camera %d:\n%s" %
(camera_id, "\n".join([str(res) for res in self._allowed_resolutions])))
rospy.set_param("~allowed_resolutions", [res.get_vapix_representation() for res in self._allowed_resolutions])
# Sometimes the camera falls into power saving mode and stops streaming.
# This setting allows the script to try to wake up the camera.
self._auto_wakeup_camera = auto_wakeup_camera
# dynamic-reconfigurable properties - definitions
self._width = None # deprecated
self._height = None # deprecated
self._resolution = None
self._compression = None
self._fps = None
self._use_color = None
self._use_square_pixels = None
# treat empty strings as None in width and height params
width = width if width != "" else None
height = height if height != "" else None
# dynamic-reconfigurable properties - defaults
if width is None and height is None:
# TODO change to perform default resolution detection from VAPIX
self.set_resolution(self._allowed_resolutions[0])
else:
resolution = self.find_resolution_by_size(width, height)
self.set_resolution(resolution.get_vapix_representation())
self.set_compression(compression)
self.set_fps(fps)
self.set_use_color(use_color)
self.set_use_square_pixels(use_square_pixels)
# only advertise the supported resolutions on dynamic reconfigure
change_enum_items(
VideoStreamConfig,
"resolution",
[{
'name': res.name if isinstance(res, CIFVideoResolution) else str(res),
'value': res.get_vapix_representation(),
'description': str(res)
} for res in self._allowed_resolutions],
self._resolution.get_vapix_representation()
)
# dynamic reconfigure server
self._video_stream_param_change_server = dynamic_reconfigure.server.Server(VideoStreamConfig,
self.reconfigure_video)
# camera info setup
self._frame_id = frame_id
self._camera_info_url = camera_info_url
# generate a valid camera name based on the hostname
self._camera_name = camera_info_manager.genCameraName(self._hostname)
self._camera_info = camera_info_manager.CameraInfoManager(cname=self._camera_name, url=self._camera_info_url)
self._camera_info.loadCameraInfo() # required before getCameraInfo()
# the thread used for streaming images (is instantiated when the first image subscriber subscribes)
self._streaming_thread = None
# the publishers are started/stopped lazily in peer_subscribe/peer_unsubscribe
self._video_publisher_frequency_diagnostic = FrequencyStatusParam({'min': self._fps, 'max': self._fps})
self._video_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("image_raw/compressed", CompressedImage, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._camera_info_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("camera_info", CameraInfo, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._snapshot_server = rospy.Service("take_snapshot", TakeSnapshot, self.take_snapshot)
self.diagnostic_updater.add(FunctionDiagnosticTask("Camera parameters", self._camera_diagnostic_callback))
# BACKWARDS COMPATIBILITY LAYER
self.username = username # deprecated
self.password = password # deprecated
self.use_encrypted_password = use_encrypted_password # deprecated
self.st = None # deprecated
self.pub = self._video_publisher # deprecated
self.caminfo_pub = self._camera_info_publisher # deprecated
self.__initializing = False
def __str__(self):
(width, height) = self._resolution.get_resolution(self._use_square_pixels)
return 'Axis driver on host %s, camera %d (%dx%d px @ %d FPS)' % \
(self._hostname, self._api.camera_id, width, height, self._fps)
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
"""Lazy-start the image-publisher."""
if self._streaming_thread is None:
self._streaming_thread = ImageStreamingThread(self)
self._streaming_thread.start()
else:
self._streaming_thread.resume()
def peer_unsubscribe(self, topic_name, num_peers):
"""Lazy-stop the image-publisher when nobody is interested"""
if num_peers == 0:
self._streaming_thread.pause()
def take_snapshot(self, request):
"""Retrieve a snapshot from the camera.
:param request: The service request.
:type request: :py:class:`axis_camera.srv.TakeSnapshotRequest`
:return: The response containing the image.
:rtype: :py:class:`axis_camera.srv.TakeSnapshotResponse`
:raises: :py:exc:`IOError`, :py:exc:`urllib2.URLError`
"""
image_data = self._api.take_snapshot()
image = CompressedImage()
image.header.stamp = rospy.Time.now()
image.header.frame_id = self._frame_id
image.format = "jpeg"
image.data = image_data
response = TakeSnapshotResponse()
response.image = image
return response
def reconfigure_video(self, config, level):
"""Dynamic reconfigure callback for video parameters.
:param config: The requested configuration.
:type config: dict
:param level: Unused here.
:type level: int
:return: The config corresponding to what was really achieved.
:rtype: dict
"""
if self.__initializing:
# in the initialization phase, we want to give precedence to the values given to the constructor
config.compression = self._compression
config.fps = self._fps
config.use_color = self._use_color
config.use_square_pixels = self._use_square_pixels
config.resolution = self._resolution.get_vapix_representation()
else:
self.__try_set_value_from_config(config, 'compression', self.set_compression)
self.__try_set_value_from_config(config, 'fps', self.set_fps)
self.__try_set_value_from_config(config, 'use_color', self.set_use_color)
self.__try_set_value_from_config(config, 'use_square_pixels', self.set_use_square_pixels)
try:
self.set_resolution(config.resolution)
except ValueError:
config.resolution = self._resolution.get_vapix_representation()
return config
def __try_set_value_from_config(self, config, field, setter):
"""First, try to call `setter(config[field])`, and if this call doesn't succeed. set the field in config to
its value stored in this class.
:param config: The dynamic reconfigure config dictionary.
:type config: dict
:param field: The field name (both in :py:obj:`config` and in :py:obj:`self`).
:type field: basestring
:param setter: The setter to use to set the value.
:type setter: lambda function
"""
try:
setter(config[field])
except ValueError:
config[field] = getattr(self, field)
#################################
# DYNAMIC RECONFIGURE CALLBACKS #
#################################
def set_resolution(self, resolution_value):
"""Request a new resolution for the video stream.
:param resolution_value: The string of type `width`x`height` or a :py:class:`VideoResolution` object.
:type resolution_value: basestring|VideoResolution
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
resolution = None
if isinstance(resolution_value, VideoResolution):
resolution = resolution_value
elif isinstance(resolution_value, basestring):
resolution = self._get_resolution_from_param_value(resolution_value)
if resolution is None:
raise ValueError("Unsupported resolution type specified: %r" % resolution_value)
if self._resolution is None or resolution != self._resolution:
self._resolution = resolution
self.video_params_changed = True
# deprecated values
self._width = resolution.get_resolution(self._use_square_pixels)[0]
self._height = resolution.get_resolution(self._use_square_pixels)[1]
def _get_resolution_from_param_value(self, value):
"""Return a :py:class:`VideoResolution` object corresponding to the given video resolution param string.
:param value: Value of the resolution parameter to parse (of form `width`x`height`).
:type value: basestring
:return: The :py:class:`VideoResolution` corresponding to the given resolution param string.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
for resolution in self._allowed_resolutions:
if resolution.get_vapix_representation() == value:
return resolution
raise ValueError("%s is not a valid valid resolution." % value)
def find_resolution_by_size(self, width, height):
"""Return a :py:class:`VideoResolution` object with the given dimensions.
If there are more resolutions with the same size, any of them may be returned.
:param width: Image width in pixels. If `None`, resolutions will be matched only by height.
:type width: int|None
:param height: Image height in pixels. If `None`, resolutions will be matched only by width.
:type height: int|None
:return: The corresponding resolution object.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if no resolution with the given dimensions can be found.
:raises: :py:exc:`ValueError` if both `width` and `height` are None.
"""
if width is None and height is None:
raise ValueError("Either width or height of the desired resolution must be specified.")
for resolution in self._allowed_resolutions:
size = resolution.get_resolution(use_square_pixels=False)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
size = resolution.get_resolution(use_square_pixels=True)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
raise ValueError("Cannot find a supported resolution with dimensions %sx%s" % (width, height))
def _get_allowed_resolutions(self):
"""Return a list of resolutions supported both by the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
camera_resolutions = self._get_resolutions_supported_by_camera()
return camera_resolutions
def _get_resolutions_supported_by_camera(self):
"""Return a list of resolutions supported the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
try:
names = self._api.parse_list_parameter_value(self._api.get_parameter("Properties.Image.Resolution"))
return [VideoResolution.parse_from_vapix_param_value(name, self._api) for name in names]
except (IOError, ValueError):
rospy.logwarn("Could not determine resolutions supported by the camera. Asssuming only CIF.")
return [CIFVideoResolution("CIF", 384, 288)]
def set_compression(self, compression):
"""Request the given compression level for the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
if compression != self._compression:
self._compression = self.sanitize_compression(compression)
self.video_params_changed = True
@staticmethod
def sanitize_compression(compression):
"""Make sure the given value can be used as a compression level of the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:return: The given compression converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
compression = int(compression)
if not (0 <= compression <= 100):
raise ValueError("%s is not a valid value for compression." % str(compression))
return compression
def set_fps(self, fps):
"""Request the given compression level for the video stream.
:param fps: The desired frames per second.
:type fps: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
if fps != self._fps:
self._fps = self.sanitize_fps(fps)
self.video_params_changed = True
if hasattr(self, "_video_publisher_frequency_diagnostic"):
self._video_publisher_frequency_diagnostic.freq_bound['min'] = self._fps
self._video_publisher_frequency_diagnostic.freq_bound['max'] = self._fps
@staticmethod
def sanitize_fps(fps):
"""Make sure the given value can be used as FPS of the video stream.
:param fps: The desired frames per second.
:type fps: int
:return: The given FPS converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
fps = int(fps)
if not (1 <= fps <= 30):
raise ValueError("%s is not a valid value for FPS." % str(fps))
return fps
def set_use_color(self, use_color):
"""Request using/not using color in the video stream.
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_color != self._use_color:
self._use_color = self.sanitize_bool(use_color, "use_color")
self.video_params_changed = True
def set_use_square_pixels(self, use_square_pixels):
"""Request using/not using square pixels.
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_square_pixels != self._use_square_pixels:
self._use_square_pixels = self.sanitize_bool(use_square_pixels, "use_square_pixels")
self.video_params_changed = True
@staticmethod
def sanitize_bool(value, field_name):
"""Convert the given value to a bool.
:param value: Either True, False,, "1", "0", 1 or 0.
:type value: :py:class:`basestring` | :py:class:`bool` | :py:class:`int`
:param field_name: Name of the field this value belongs to (just for debug messages).
:type field_name: basestring
:return: The bool value of the given value.
:rtype: :py:class:`bool`
:raises: :py:exc:`ValueError` if the given value is not supported in this conversion.
"""
if value not in (True, False, "1", "0", 1, 0):
raise ValueError("%s is not a valid value for %s." % (str(value), field_name))
# bool("0") returns True because it is a nonempty string
if value == "0":
return False
return bool(value)
def _camera_diagnostic_callback(self, diag_message):
assert isinstance(diag_message, DiagnosticStatusWrapper)
diag_message.summary(DiagnosticStatusWrapper.OK, "Video parameters")
diag_message.add("FPS", self._fps)
diag_message.add("Resolution", self._resolution)
diag_message.add("Compression", self._compression)
diag_message.add("Color image", self._use_color)
diag_message.add("Square pixels used", self._use_square_pixels)
class VideoResolution(object):
"""A class representing a video resolution."""
def __init__(self, width, height):
"""Create a representation of the resolution.
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(VideoResolution, self).__init__()
self.width = int(width)
self.height = int(height)
self.square_pixel_conversion_ratio_width = 12.0 / 11.0
self.square_pixel_conversion_ratio_height = 1
def __str__(self):
return "%dx%d" % (self.width, self.height)
def __repr__(self):
return "VideoResolution(width=%r,height=%r)" % (self.width, self.height)
def __eq__(self, other):
# compare by attribute values
return self.__dict__ == other.__dict__
def __ne__(self, other):
# reuse the former __eq__ definition
return not self == other
def get_resolution(self, use_square_pixels=False):
"""Get the image dimensions corresponding to this resolution.
:param use_square_pixels: Whether to strech the resulting resolution to square pixels.
:type use_square_pixels: bool
:return: A tuple (width, height)
:rtype: tuple
"""
width = self.width
height = self.height
if use_square_pixels:
width = int(math.ceil(self.square_pixel_conversion_ratio_width * self.width))
height = int(math.ceil(self.square_pixel_conversion_ratio_height * self.height))
return width, height
def get_vapix_representation(self):
return "%dx%d" % (self.width, self.height)
@staticmethod
def parse_from_vapix_param_value(value, api):
assert isinstance(value, basestring)
assert isinstance(api, VAPIX)
numeric_regexp = re.compile(r"(\d+)x(\d+)")
match = numeric_regexp.match(value)
if match is not None:
return VideoResolution(int(match.group(1)), int(match.group(2)))
else: # resolution given by CIF name
name = value
width, height = api.resolve_video_resolution_name(name)
return CIFVideoResolution(name, width, height)
class CIFVideoResolution(VideoResolution):
"""A class representing a CIF standard resolution."""
def __init__(self, name, width, height):
"""Create a representation of a CIF resolution.
:param name: CIF standard name of the resolution.
:type name: basestring
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(CIFVideoResolution, self).__init__(width, height)
self.name = name
def __str__(self):
return "%s (%dx%d)" % (self.name, self.width, self.height)
def __repr__(self):
return "CIFVideoResolution(name=%r,width=%r,height=%r)" % (self.name, self.width, self.height)
def main():
"""Start the ROS driver and ROS node."""
rospy.init_node("axis_driver")
arg_defaults = {
'hostname': '192.168.0.90', # default IP address
'username': None, # default login name
'password': None,
'width': 704,
'height': 576,
'frame_id': 'axis_camera',
'camera_info_url': '',
'use_encrypted_password': False,
'camera_id': 1,
'auto_wakeup_camera': True,
'compression': 0,
'fps': 24,
'use_color': True,
'use_square_pixels': False,
}
args = read_args_with_defaults(arg_defaults)
axis = Axis(**args)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
axis.diagnostic_updater.update()
try:
rate.sleep()
except rospy.ROSTimeMovedBackwardsException:
rospy.logwarn("Detected jump back in time.")
class PausableDiagnosedPublisher(DiagnosedPublisher):
def __init__(self, axis, pub, diag, freq, stamp):
DiagnosedPublisher.__init__(self, pub, diag, freq, stamp)
self._axis = axis
def run(self, stat):
if self._axis._streaming_thread is None or self._axis._streaming_thread.is_paused():
stat.summary(DiagnosticStatusWrapper.OK, "Video not subscribed")
else:
stat = DiagnosedPublisher.run(self, stat)
return stat
def read_args_with_defaults(arg_defaults):
"""Look up parameters starting in the driver's private parameter space, but also searching outer namespaces.
Defining them in a higher namespace allows the axis_ptz.py script to share parameters with the driver."""
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
# resolve frame_id with tf_prefix (unless already absolute)
if args['frame_id'][0] != '/': # not absolute?
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None: # prefix defined?
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/': # prefix not absolute?
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return args
if __name__ == "__main__":
main()
|
set_application
|
Set ``CommandLineInterface`` instance for this connection.
(This can be replaced any time.)
:param cli: CommandLineInterface instance.
:param callback: Callable that takes the result of the CLI.
|
"""
Telnet server.
Example usage::
class MyTelnetApplication(TelnetApplication):
def client_connected(self, telnet_connection):
# Set CLI with simple prompt.
telnet_connection.set_application(
telnet_connection.create_prompt_application(...))
def handle_command(self, telnet_connection, document):
# When the client enters a command, just reply.
telnet_connection.send('You said: %r\n\n' % document.text)
...
a = MyTelnetApplication()
TelnetServer(application=a, host='127.0.0.1', port=23).run()
"""
from __future__ import unicode_literals
import socket
import select
import threading
import os
import fcntl
from six import int2byte, text_type, binary_type
from codecs import getincrementaldecoder
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.eventloop.base import EventLoop
from prompt_toolkit.interface import CommandLineInterface, Application
from prompt_toolkit.layout.screen import Size
from prompt_toolkit.shortcuts import create_prompt_application
from prompt_toolkit.terminal.vt100_input import InputStream
from prompt_toolkit.terminal.vt100_output import Vt100_Output
from .log import logger
from .protocol import IAC, DO, LINEMODE, SB, MODE, SE, WILL, ECHO, NAWS, SUPPRESS_GO_AHEAD
from .protocol import TelnetProtocolParser
from .application import TelnetApplication
__all__ = (
'TelnetServer',
)
def _initialize_telnet(connection):
logger.info('Initializing telnet connection')
# Iac Do Linemode
connection.send(IAC + DO + LINEMODE)
# Suppress Go Ahead. (This seems important for Putty to do correct echoing.)
# This will allow bi-directional operation.
connection.send(IAC + WILL + SUPPRESS_GO_AHEAD)
# Iac sb
connection.send(IAC + SB + LINEMODE + MODE + int2byte(0) + IAC + SE)
# IAC Will Echo
connection.send(IAC + WILL + ECHO)
# Negotiate window size
connection.send(IAC + DO + NAWS)
class _ConnectionStdout(object):
"""
Wrapper around socket which provides `write` and `flush` methods for the
Vt100_Output output.
"""
def __init__(self, connection, encoding):
self._encoding = encoding
self._connection = connection
self._buffer = []
def write(self, data):
assert isinstance(data, text_type)
self._buffer.append(data.encode(self._encoding))
self.flush()
def flush(self):
try:
self._connection.send(b''.join(self._buffer))
except socket.error as e:
logger.error("Couldn't send data over socket: %s" % e)
self._buffer = []
class TelnetConnection(object):
"""
Class that represents one Telnet connection.
"""
def __init__(self, conn, addr, application, server, encoding):
assert isinstance(addr, tuple) # (addr, port) tuple
assert isinstance(application, TelnetApplication)
assert isinstance(server, TelnetServer)
assert isinstance(encoding, text_type) # e.g. 'utf-8'
self.conn = conn
self.addr = addr
self.application = application
self.closed = False
self.handling_command = True
self.server = server
self.encoding = encoding
self.callback = None # Function that handles the CLI result.
# Create "Output" object.
self.size = Size(rows=40, columns=79)
# Initialize.
_initialize_telnet(conn)
# Create output.
def get_size():
return self.size
self.stdout = _ConnectionStdout(conn, encoding=encoding)
self.vt100_output = Vt100_Output(self.stdout, get_size, write_binary=False)
# Create an eventloop (adaptor) for the CommandLineInterface.
self.eventloop = _TelnetEventLoopInterface(server)
# Set default CommandLineInterface.
self.set_application(create_prompt_application())
# Call client_connected
application.client_connected(self)
# Draw for the first time.
self.handling_command = False
self.cli._redraw()
# MASKED: set_application function (lines 139-185)
def feed(self, data):
"""
Handler for incoming data. (Called by TelnetServer.)
"""
assert isinstance(data, binary_type)
self.parser.feed(data)
# Render again.
self.cli._redraw()
# When a return value has been set (enter was pressed), handle command.
if self.cli.is_returning:
try:
return_value = self.cli.return_value()
except (EOFError, KeyboardInterrupt) as e:
# Control-D or Control-C was pressed.
logger.info('%s, closing connection.', type(e).__name__)
self.close()
return
# Handle CLI command
self._handle_command(return_value)
def _handle_command(self, command):
"""
Handle command. This will run in a separate thread, in order not
to block the event loop.
"""
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if self.callback is not None:
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
# Reset state and draw again. (If the connection is still open --
# the application could have called TelnetConnection.close()
if not self.closed:
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor)
def erase_screen(self):
"""
Erase output screen.
"""
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush()
def send(self, data):
"""
Send text to the client.
"""
assert isinstance(data, text_type)
# When data is send back to the client, we should replace the line
# endings. (We didn't allocate a real pseudo terminal, and the telnet
# connection is raw, so we are responsible for inserting \r.)
self.stdout.write(data.replace('\n', '\r\n'))
self.stdout.flush()
def close(self):
"""
Close the connection.
"""
self.application.client_leaving(self)
self.conn.close()
self.closed = True
class _TelnetEventLoopInterface(EventLoop):
"""
Eventloop object to be assigned to `CommandLineInterface`.
"""
def __init__(self, server):
self._server = server
def close(self):
" Ignore. "
def stop(self):
" Ignore. "
def run_in_executor(self, callback):
self._server.run_in_executor(callback)
def call_from_executor(self, callback, _max_postpone_until=None):
self._server.call_from_executor(callback)
def add_reader(self, fd, callback):
raise NotImplementedError
def remove_reader(self, fd):
raise NotImplementedError
class TelnetServer(object):
"""
Telnet server implementation.
"""
def __init__(self, host='127.0.0.1', port=23, application=None, encoding='utf-8'):
assert isinstance(host, text_type)
assert isinstance(port, int)
assert isinstance(application, TelnetApplication)
assert isinstance(encoding, text_type)
self.host = host
self.port = port
self.application = application
self.encoding = encoding
self.connections = set()
self._calls_from_executor = []
# Create a pipe for inter thread communication.
self._schedule_pipe = os.pipe()
fcntl.fcntl(self._schedule_pipe[0], fcntl.F_SETFL, os.O_NONBLOCK)
@classmethod
def create_socket(cls, host, port):
# Create and bind socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(4)
return s
def run_in_executor(self, callback):
threading.Thread(target=callback).start()
def call_from_executor(self, callback):
self._calls_from_executor.append(callback)
if self._schedule_pipe:
os.write(self._schedule_pipe[1], b'x')
def _process_callbacks(self):
"""
Process callbacks from `call_from_executor` in eventloop.
"""
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c()
def run(self):
"""
Run the eventloop for the telnet server.
"""
listen_socket = self.create_socket(self.host, self.port)
logger.info('Listening for telnet connections on %s port %r', self.host, self.port)
try:
while True:
# Removed closed connections.
self.connections = set([c for c in self.connections if not c.closed])
# Ignore connections handling commands.
connections = set([c for c in self.connections if not c.handling_command])
# Wait for next event.
read_list = (
[listen_socket, self._schedule_pipe[0]] +
[c.conn for c in connections])
read, _, _ = select.select(read_list, [], [])
for s in read:
# When the socket itself is ready, accept a new connection.
if s == listen_socket:
self._accept(listen_socket)
# If we receive something on our "call_from_executor" pipe, process
# these callbacks in a thread safe way.
elif s == self._schedule_pipe[0]:
self._process_callbacks()
# Handle incoming data on socket.
else:
self._handle_incoming_data(s)
finally:
listen_socket.close()
def _accept(self, listen_socket):
"""
Accept new incoming connection.
"""
conn, addr = listen_socket.accept()
connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding)
self.connections.add(connection)
logger.info('New connection %r %r', *addr)
def _handle_incoming_data(self, conn):
"""
Handle incoming data on socket.
"""
connection = [c for c in self.connections if c.conn == conn][0]
data = conn.recv(1024)
if data:
connection.feed(data)
else:
self.connections.remove(connection)
|
def set_application(self, app, callback=None):
"""
Set ``CommandLineInterface`` instance for this connection.
(This can be replaced any time.)
:param cli: CommandLineInterface instance.
:param callback: Callable that takes the result of the CLI.
"""
assert isinstance(app, Application)
assert callback is None or callable(callback)
self.cli = CommandLineInterface(
application=app,
eventloop=self.eventloop,
output=self.vt100_output)
self.callback = callback
# Create a parser, and parser callbacks.
cb = self.cli.create_eventloop_callbacks()
inputstream = InputStream(cb.feed_key)
# Input decoder for stdin. (Required when working with multibyte
# characters, like chinese input.)
stdin_decoder_cls = getincrementaldecoder(self.encoding)
stdin_decoder = [stdin_decoder_cls()] # nonlocal
# Tell the CLI that it's running. We don't start it through the run()
# call, but will still want _redraw() to work.
self.cli._is_running = True
def data_received(data):
""" TelnetProtocolParser 'data_received' callback """
assert isinstance(data, binary_type)
try:
result = stdin_decoder[0].decode(data)
inputstream.feed(result)
except UnicodeDecodeError:
stdin_decoder[0] = stdin_decoder_cls()
return ''
def size_received(rows, columns):
""" TelnetProtocolParser 'size_received' callback """
self.size = Size(rows=rows, columns=columns)
cb.terminal_size_changed()
self.parser = TelnetProtocolParser(data_received, size_received)
| 139 | 185 |
"""
Telnet server.
Example usage::
class MyTelnetApplication(TelnetApplication):
def client_connected(self, telnet_connection):
# Set CLI with simple prompt.
telnet_connection.set_application(
telnet_connection.create_prompt_application(...))
def handle_command(self, telnet_connection, document):
# When the client enters a command, just reply.
telnet_connection.send('You said: %r\n\n' % document.text)
...
a = MyTelnetApplication()
TelnetServer(application=a, host='127.0.0.1', port=23).run()
"""
from __future__ import unicode_literals
import socket
import select
import threading
import os
import fcntl
from six import int2byte, text_type, binary_type
from codecs import getincrementaldecoder
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.eventloop.base import EventLoop
from prompt_toolkit.interface import CommandLineInterface, Application
from prompt_toolkit.layout.screen import Size
from prompt_toolkit.shortcuts import create_prompt_application
from prompt_toolkit.terminal.vt100_input import InputStream
from prompt_toolkit.terminal.vt100_output import Vt100_Output
from .log import logger
from .protocol import IAC, DO, LINEMODE, SB, MODE, SE, WILL, ECHO, NAWS, SUPPRESS_GO_AHEAD
from .protocol import TelnetProtocolParser
from .application import TelnetApplication
__all__ = (
'TelnetServer',
)
def _initialize_telnet(connection):
logger.info('Initializing telnet connection')
# Iac Do Linemode
connection.send(IAC + DO + LINEMODE)
# Suppress Go Ahead. (This seems important for Putty to do correct echoing.)
# This will allow bi-directional operation.
connection.send(IAC + WILL + SUPPRESS_GO_AHEAD)
# Iac sb
connection.send(IAC + SB + LINEMODE + MODE + int2byte(0) + IAC + SE)
# IAC Will Echo
connection.send(IAC + WILL + ECHO)
# Negotiate window size
connection.send(IAC + DO + NAWS)
class _ConnectionStdout(object):
"""
Wrapper around socket which provides `write` and `flush` methods for the
Vt100_Output output.
"""
def __init__(self, connection, encoding):
self._encoding = encoding
self._connection = connection
self._buffer = []
def write(self, data):
assert isinstance(data, text_type)
self._buffer.append(data.encode(self._encoding))
self.flush()
def flush(self):
try:
self._connection.send(b''.join(self._buffer))
except socket.error as e:
logger.error("Couldn't send data over socket: %s" % e)
self._buffer = []
class TelnetConnection(object):
"""
Class that represents one Telnet connection.
"""
def __init__(self, conn, addr, application, server, encoding):
assert isinstance(addr, tuple) # (addr, port) tuple
assert isinstance(application, TelnetApplication)
assert isinstance(server, TelnetServer)
assert isinstance(encoding, text_type) # e.g. 'utf-8'
self.conn = conn
self.addr = addr
self.application = application
self.closed = False
self.handling_command = True
self.server = server
self.encoding = encoding
self.callback = None # Function that handles the CLI result.
# Create "Output" object.
self.size = Size(rows=40, columns=79)
# Initialize.
_initialize_telnet(conn)
# Create output.
def get_size():
return self.size
self.stdout = _ConnectionStdout(conn, encoding=encoding)
self.vt100_output = Vt100_Output(self.stdout, get_size, write_binary=False)
# Create an eventloop (adaptor) for the CommandLineInterface.
self.eventloop = _TelnetEventLoopInterface(server)
# Set default CommandLineInterface.
self.set_application(create_prompt_application())
# Call client_connected
application.client_connected(self)
# Draw for the first time.
self.handling_command = False
self.cli._redraw()
def set_application(self, app, callback=None):
"""
Set ``CommandLineInterface`` instance for this connection.
(This can be replaced any time.)
:param cli: CommandLineInterface instance.
:param callback: Callable that takes the result of the CLI.
"""
assert isinstance(app, Application)
assert callback is None or callable(callback)
self.cli = CommandLineInterface(
application=app,
eventloop=self.eventloop,
output=self.vt100_output)
self.callback = callback
# Create a parser, and parser callbacks.
cb = self.cli.create_eventloop_callbacks()
inputstream = InputStream(cb.feed_key)
# Input decoder for stdin. (Required when working with multibyte
# characters, like chinese input.)
stdin_decoder_cls = getincrementaldecoder(self.encoding)
stdin_decoder = [stdin_decoder_cls()] # nonlocal
# Tell the CLI that it's running. We don't start it through the run()
# call, but will still want _redraw() to work.
self.cli._is_running = True
def data_received(data):
""" TelnetProtocolParser 'data_received' callback """
assert isinstance(data, binary_type)
try:
result = stdin_decoder[0].decode(data)
inputstream.feed(result)
except UnicodeDecodeError:
stdin_decoder[0] = stdin_decoder_cls()
return ''
def size_received(rows, columns):
""" TelnetProtocolParser 'size_received' callback """
self.size = Size(rows=rows, columns=columns)
cb.terminal_size_changed()
self.parser = TelnetProtocolParser(data_received, size_received)
def feed(self, data):
"""
Handler for incoming data. (Called by TelnetServer.)
"""
assert isinstance(data, binary_type)
self.parser.feed(data)
# Render again.
self.cli._redraw()
# When a return value has been set (enter was pressed), handle command.
if self.cli.is_returning:
try:
return_value = self.cli.return_value()
except (EOFError, KeyboardInterrupt) as e:
# Control-D or Control-C was pressed.
logger.info('%s, closing connection.', type(e).__name__)
self.close()
return
# Handle CLI command
self._handle_command(return_value)
def _handle_command(self, command):
"""
Handle command. This will run in a separate thread, in order not
to block the event loop.
"""
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if self.callback is not None:
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
# Reset state and draw again. (If the connection is still open --
# the application could have called TelnetConnection.close()
if not self.closed:
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor)
def erase_screen(self):
"""
Erase output screen.
"""
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush()
def send(self, data):
"""
Send text to the client.
"""
assert isinstance(data, text_type)
# When data is send back to the client, we should replace the line
# endings. (We didn't allocate a real pseudo terminal, and the telnet
# connection is raw, so we are responsible for inserting \r.)
self.stdout.write(data.replace('\n', '\r\n'))
self.stdout.flush()
def close(self):
"""
Close the connection.
"""
self.application.client_leaving(self)
self.conn.close()
self.closed = True
class _TelnetEventLoopInterface(EventLoop):
"""
Eventloop object to be assigned to `CommandLineInterface`.
"""
def __init__(self, server):
self._server = server
def close(self):
" Ignore. "
def stop(self):
" Ignore. "
def run_in_executor(self, callback):
self._server.run_in_executor(callback)
def call_from_executor(self, callback, _max_postpone_until=None):
self._server.call_from_executor(callback)
def add_reader(self, fd, callback):
raise NotImplementedError
def remove_reader(self, fd):
raise NotImplementedError
class TelnetServer(object):
"""
Telnet server implementation.
"""
def __init__(self, host='127.0.0.1', port=23, application=None, encoding='utf-8'):
assert isinstance(host, text_type)
assert isinstance(port, int)
assert isinstance(application, TelnetApplication)
assert isinstance(encoding, text_type)
self.host = host
self.port = port
self.application = application
self.encoding = encoding
self.connections = set()
self._calls_from_executor = []
# Create a pipe for inter thread communication.
self._schedule_pipe = os.pipe()
fcntl.fcntl(self._schedule_pipe[0], fcntl.F_SETFL, os.O_NONBLOCK)
@classmethod
def create_socket(cls, host, port):
# Create and bind socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(4)
return s
def run_in_executor(self, callback):
threading.Thread(target=callback).start()
def call_from_executor(self, callback):
self._calls_from_executor.append(callback)
if self._schedule_pipe:
os.write(self._schedule_pipe[1], b'x')
def _process_callbacks(self):
"""
Process callbacks from `call_from_executor` in eventloop.
"""
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c()
def run(self):
"""
Run the eventloop for the telnet server.
"""
listen_socket = self.create_socket(self.host, self.port)
logger.info('Listening for telnet connections on %s port %r', self.host, self.port)
try:
while True:
# Removed closed connections.
self.connections = set([c for c in self.connections if not c.closed])
# Ignore connections handling commands.
connections = set([c for c in self.connections if not c.handling_command])
# Wait for next event.
read_list = (
[listen_socket, self._schedule_pipe[0]] +
[c.conn for c in connections])
read, _, _ = select.select(read_list, [], [])
for s in read:
# When the socket itself is ready, accept a new connection.
if s == listen_socket:
self._accept(listen_socket)
# If we receive something on our "call_from_executor" pipe, process
# these callbacks in a thread safe way.
elif s == self._schedule_pipe[0]:
self._process_callbacks()
# Handle incoming data on socket.
else:
self._handle_incoming_data(s)
finally:
listen_socket.close()
def _accept(self, listen_socket):
"""
Accept new incoming connection.
"""
conn, addr = listen_socket.accept()
connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding)
self.connections.add(connection)
logger.info('New connection %r %r', *addr)
def _handle_incoming_data(self, conn):
"""
Handle incoming data on socket.
"""
connection = [c for c in self.connections if c.conn == conn][0]
data = conn.recv(1024)
if data:
connection.feed(data)
else:
self.connections.remove(connection)
|
display_auth_cache
|
Writes to the screen the state of the authentication cache. (For debugging
authentication issues.) BEWARE: DO NOT email the output of this command!!!
You must keep the tokens secure. Treat them as passwords.
|
#!/usr/bin/env python
# Copyright 2015 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Coursera's asynchronous grader command line SDK.
You may install it from source, or via pip.
"""
from courseraprogramming.commands import oauth2
import requests
import logging
import time
import sys
def check_auth(args):
"""
Checks courseraprogramming's connectivity to the coursera.org API servers
"""
oauth2_instance = oauth2.build_oauth2(args)
auth = oauth2_instance.build_authorizer()
my_profile_url = (
'https://api.coursera.org/api/externalBasicProfiles.v1?'
'q=me&fields=name'
)
r = requests.get(my_profile_url, auth=auth)
if r.status_code != 200:
logging.error('Received response code %s from the basic profile API.',
r.status_code)
logging.debug('Response body:\n%s', r.text)
sys.exit(1)
try:
external_id = r.json()['elements'][0]['id']
except:
logging.error(
'Could not parse the external id out of the response body %s',
r.text)
external_id = None
try:
name = r.json()['elements'][0]['name']
except:
logging.error(
'Could not parse the name out of the response body %s',
r.text)
name = None
if not args.quiet or args.quiet == 0:
print('Name: %s' % name)
print('External ID: %s' % external_id)
if name is None or external_id is None:
sys.exit(1)
# MASKED: display_auth_cache function (lines 70-93)
def parser(subparsers):
"Build an argparse argument parser to parse the command line."
# create the parser for the configure subcommand. (authentication / etc.)
parser_config = subparsers.add_parser(
'configure',
help='Configure %(prog)s for operation!')
config_subparsers = parser_config.add_subparsers()
# Local subsubcommand of the grade subcommand
parser_check_auth = config_subparsers.add_parser(
'check-auth',
help=check_auth.__doc__)
parser_check_auth.set_defaults(func=check_auth)
parser_local_cache = config_subparsers.add_parser(
'display-auth-cache',
help=display_auth_cache.__doc__)
parser_local_cache.set_defaults(func=display_auth_cache)
parser_local_cache.add_argument(
'--no-truncate',
action='store_true',
help='Do not truncate the keys [DANGER!!]')
return parser_config
|
def display_auth_cache(args):
'''
Writes to the screen the state of the authentication cache. (For debugging
authentication issues.) BEWARE: DO NOT email the output of this command!!!
You must keep the tokens secure. Treat them as passwords.
'''
oauth2_instance = oauth2.build_oauth2(args)
if not args.quiet or args.quiet == 0:
token = oauth2_instance.token_cache['token']
if not args.no_truncate and token is not None:
token = token[:10] + '...'
print("Auth token: %s" % token)
expires_time = oauth2_instance.token_cache['expires']
expires_in = int((expires_time - time.time()) * 10) / 10.0
print("Auth token expires in: %s seconds." % expires_in)
if 'refresh' in oauth2_instance.token_cache:
refresh = oauth2_instance.token_cache['refresh']
if not args.no_truncate and refresh is not None:
refresh = refresh[:10] + '...'
print("Refresh token: %s" % refresh)
else:
print("No refresh token found.")
| 70 | 93 |
#!/usr/bin/env python
# Copyright 2015 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Coursera's asynchronous grader command line SDK.
You may install it from source, or via pip.
"""
from courseraprogramming.commands import oauth2
import requests
import logging
import time
import sys
def check_auth(args):
"""
Checks courseraprogramming's connectivity to the coursera.org API servers
"""
oauth2_instance = oauth2.build_oauth2(args)
auth = oauth2_instance.build_authorizer()
my_profile_url = (
'https://api.coursera.org/api/externalBasicProfiles.v1?'
'q=me&fields=name'
)
r = requests.get(my_profile_url, auth=auth)
if r.status_code != 200:
logging.error('Received response code %s from the basic profile API.',
r.status_code)
logging.debug('Response body:\n%s', r.text)
sys.exit(1)
try:
external_id = r.json()['elements'][0]['id']
except:
logging.error(
'Could not parse the external id out of the response body %s',
r.text)
external_id = None
try:
name = r.json()['elements'][0]['name']
except:
logging.error(
'Could not parse the name out of the response body %s',
r.text)
name = None
if not args.quiet or args.quiet == 0:
print('Name: %s' % name)
print('External ID: %s' % external_id)
if name is None or external_id is None:
sys.exit(1)
def display_auth_cache(args):
'''
Writes to the screen the state of the authentication cache. (For debugging
authentication issues.) BEWARE: DO NOT email the output of this command!!!
You must keep the tokens secure. Treat them as passwords.
'''
oauth2_instance = oauth2.build_oauth2(args)
if not args.quiet or args.quiet == 0:
token = oauth2_instance.token_cache['token']
if not args.no_truncate and token is not None:
token = token[:10] + '...'
print("Auth token: %s" % token)
expires_time = oauth2_instance.token_cache['expires']
expires_in = int((expires_time - time.time()) * 10) / 10.0
print("Auth token expires in: %s seconds." % expires_in)
if 'refresh' in oauth2_instance.token_cache:
refresh = oauth2_instance.token_cache['refresh']
if not args.no_truncate and refresh is not None:
refresh = refresh[:10] + '...'
print("Refresh token: %s" % refresh)
else:
print("No refresh token found.")
def parser(subparsers):
"Build an argparse argument parser to parse the command line."
# create the parser for the configure subcommand. (authentication / etc.)
parser_config = subparsers.add_parser(
'configure',
help='Configure %(prog)s for operation!')
config_subparsers = parser_config.add_subparsers()
# Local subsubcommand of the grade subcommand
parser_check_auth = config_subparsers.add_parser(
'check-auth',
help=check_auth.__doc__)
parser_check_auth.set_defaults(func=check_auth)
parser_local_cache = config_subparsers.add_parser(
'display-auth-cache',
help=display_auth_cache.__doc__)
parser_local_cache.set_defaults(func=display_auth_cache)
parser_local_cache.add_argument(
'--no-truncate',
action='store_true',
help='Do not truncate the keys [DANGER!!]')
return parser_config
|
_log_ref
|
Log a reference to an in-memory object.
Return True if this object is new and was assigned
a new ID. Otherwise return False.
|
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009-2018 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from __future__ import absolute_import, division, unicode_literals
import decimal
import warnings
import sys
import types
from itertools import chain, islice
from . import compat
from . import util
from . import tags
from . import handlers
from .backend import json
from .compat import numeric_types, string_types, PY3, PY2
def encode(
value,
unpicklable=True,
make_refs=True,
keys=False,
max_depth=None,
reset=True,
backend=None,
warn=False,
context=None,
max_iter=None,
use_decimal=False,
numeric_keys=False,
use_base85=False,
fail_safe=None,
indent=None,
separators=None,
):
"""Return a JSON formatted representation of value, a Python object.
:param unpicklable: If set to False then the output will not contain the
information necessary to turn the JSON data back into Python objects,
but a simpler JSON stream is produced.
:param max_depth: If set to a non-negative integer then jsonpickle will
not recurse deeper than 'max_depth' steps into the object. Anything
deeper than 'max_depth' is represented using a Python repr() of the
object.
:param make_refs: If set to False jsonpickle's referencing support is
disabled. Objects that are id()-identical won't be preserved across
encode()/decode(), but the resulting JSON stream will be conceptually
simpler. jsonpickle detects cyclical objects and will break the cycle
by calling repr() instead of recursing when make_refs is set False.
:param keys: If set to True then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
This is typically what you want if you need to support Integer or
objects as dictionary keys.
:param numeric_keys: Only use this option if the backend supports integer
dict keys natively. This flag tells jsonpickle to leave numeric keys
as-is rather than conforming them to json-friendly strings.
Using ``keys=True`` is the typical solution for integer keys, so only
use this if you have a specific use case where you want to allow the
backend to handle serialization of numeric dict keys.
:param warn: If set to True then jsonpickle will warn when it
returns None for an object which it cannot pickle
(e.g. file descriptors).
:param max_iter: If set to a non-negative integer then jsonpickle will
consume at most `max_iter` items when pickling iterators.
:param use_decimal: If set to True jsonpickle will allow Decimal
instances to pass-through, with the assumption that the simplejson
backend will be used in `use_decimal` mode. In order to use this mode
you will need to configure simplejson::
jsonpickle.set_encoder_options('simplejson',
use_decimal=True, sort_keys=True)
jsonpickle.set_decoder_options('simplejson',
use_decimal=True)
jsonpickle.set_preferred_backend('simplejson')
NOTE: A side-effect of the above settings is that float values will be
converted to Decimal when converting to json.
:param use_base85:
If possible, use base85 to encode binary data. Base85 bloats binary data
by 1/4 as opposed to base64, which expands it by 1/3. This argument is
ignored on Python 2 because it doesn't support it.
:param fail_safe: If set to a function exceptions are ignored when pickling
and if a exception happens the function is called and the return value
is used as the value for the object that caused the error
:param indent: When `indent` is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that indent
level. An indent level of 0 will only insert newlines. ``None`` is
the most compact representation. Since the default item separator is
``(', ', ': ')``, the output might include trailing whitespace when
``indent`` is specified. You can use ``separators=(',', ': ')`` to
avoid this. This value is passed directly to the active JSON backend
library and not used by jsonpickle directly.
:param separators:
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')``
separators. ``(',', ':')`` is the most compact JSON representation.
This value is passed directly to the active JSON backend library and
not used by jsonpickle directly.
>>> encode('my string') == '"my string"'
True
>>> encode(36) == '36'
True
>>> encode({'foo': True}) == '{"foo": true}'
True
>>> encode({'foo': [1, 2, [3, 4]]}, max_depth=1)
'{"foo": "[1, 2, [3, 4]]"}'
"""
backend = backend or json
context = context or Pickler(
unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
backend=backend,
max_depth=max_depth,
warn=warn,
max_iter=max_iter,
numeric_keys=numeric_keys,
use_decimal=use_decimal,
use_base85=use_base85,
fail_safe=fail_safe,
)
return backend.encode(
context.flatten(value, reset=reset), indent=indent, separators=separators
)
class Pickler(object):
def __init__(
self,
unpicklable=True,
make_refs=True,
max_depth=None,
backend=None,
keys=False,
warn=False,
max_iter=None,
numeric_keys=False,
use_decimal=False,
use_base85=False,
fail_safe=None,
):
self.unpicklable = unpicklable
self.make_refs = make_refs
self.backend = backend or json
self.keys = keys
self.warn = warn
self.numeric_keys = numeric_keys
self.use_base85 = use_base85 and (not PY2)
# The current recursion depth
self._depth = -1
# The maximal recursion depth
self._max_depth = max_depth
# Maps id(obj) to reference IDs
self._objs = {}
# Avoids garbage collection
self._seen = []
# maximum amount of items to take from a pickled iterator
self._max_iter = max_iter
# Whether to allow decimals to pass-through
self._use_decimal = use_decimal
if self.use_base85:
self._bytes_tag = tags.B85
self._bytes_encoder = util.b85encode
else:
self._bytes_tag = tags.B64
self._bytes_encoder = util.b64encode
# ignore exceptions
self.fail_safe = fail_safe
def reset(self):
self._objs = {}
self._depth = -1
self._seen = []
def _push(self):
"""Steps down one level in the namespace."""
self._depth += 1
def _pop(self, value):
"""Step up one level in the namespace and return the value.
If we're at the root, reset the pickler's state.
"""
self._depth -= 1
if self._depth == -1:
self.reset()
return value
# MASKED: _log_ref function (lines 196-207)
def _mkref(self, obj):
"""
Log a reference to an in-memory object, and return
if that object should be considered newly logged.
"""
is_new = self._log_ref(obj)
# Pretend the object is new
pretend_new = not self.unpicklable or not self.make_refs
return pretend_new or is_new
def _getref(self, obj):
return {tags.ID: self._objs.get(id(obj))}
def flatten(self, obj, reset=True):
"""Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world') == 'hello world'
True
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'}) == {'key': 'value'}
True
"""
if reset:
self.reset()
return self._flatten(obj)
def _flatten(self, obj):
#########################################
# if obj is nonrecursive return immediately
# for performance reasons we don't want to do recursive checks
if PY2 and isinstance(obj, types.FileType):
return self._flatten_file(obj)
if util.is_bytes(obj):
return self._flatten_bytestring(obj)
if util.is_primitive(obj):
return obj
# Decimal is a primitive when use_decimal is True
if self._use_decimal and isinstance(obj, decimal.Decimal):
return obj
#########################################
self._push()
return self._pop(self._flatten_obj(obj))
def _max_reached(self):
return self._depth == self._max_depth
def _flatten_obj(self, obj):
self._seen.append(obj)
max_reached = self._max_reached()
try:
in_cycle = _in_cycle(obj, self._objs, max_reached, self.make_refs)
if in_cycle:
# break the cycle
flatten_func = repr
else:
flatten_func = self._get_flattener(obj)
if flatten_func is None:
self._pickle_warning(obj)
return None
return flatten_func(obj)
except (KeyboardInterrupt, SystemExit) as e:
raise e
except Exception as e:
if self.fail_safe is None:
raise e
else:
return self.fail_safe(e)
def _list_recurse(self, obj):
return [self._flatten(v) for v in obj]
def _get_flattener(self, obj):
list_recurse = self._list_recurse
if util.is_list(obj):
if self._mkref(obj):
return list_recurse
else:
self._push()
return self._getref
# We handle tuples and sets by encoding them in a "(tuple|set)dict"
if util.is_tuple(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]}
if util.is_set(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.SET: [self._flatten(v) for v in obj]}
if util.is_dictionary(obj):
return self._flatten_dict_obj
if util.is_type(obj):
return _mktyperef
if util.is_object(obj):
return self._ref_obj_instance
if util.is_module_function(obj):
return self._flatten_function
# instance methods, lambdas, old style classes...
self._pickle_warning(obj)
return None
def _ref_obj_instance(self, obj):
"""Reference an existing object or flatten if new"""
if self.unpicklable:
if self._mkref(obj):
# We've never seen this object so return its
# json representation.
return self._flatten_obj_instance(obj)
# We've seen this object before so place an object
# reference tag in the data. This avoids infinite recursion
# when processing cyclical objects.
return self._getref(obj)
else:
max_reached = self._max_reached()
in_cycle = _in_cycle(obj, self._objs, max_reached, False)
if in_cycle:
# A circular becomes None.
return None
self._mkref(obj)
return self._flatten_obj_instance(obj)
def _flatten_file(self, obj):
"""
Special case file objects
"""
assert not PY3 and isinstance(obj, types.FileType)
return None
def _flatten_bytestring(self, obj):
if PY2:
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
pass
return {self._bytes_tag: self._bytes_encoder(obj)}
def _flatten_obj_instance(self, obj):
"""Recursively flatten an instance and return a json-friendly dict"""
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = not has_dict and hasattr(obj, '__slots__')
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
has_reduce, has_reduce_ex = util.has_reduce(obj)
# Support objects with __getstate__(); this ensures that
# both __setstate__() and __getstate__() are implemented
has_getstate = hasattr(obj, '__getstate__')
# not using has_method since __getstate__() is handled separately below
if has_class:
cls = obj.__class__
else:
cls = type(obj)
# Check for a custom handler
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if handler is not None:
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if has_reduce and not has_reduce_ex:
try:
reduce_val = obj.__reduce__()
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
# test for a reduce implementation, and redirect before
# doing anything else if that is what reduce requests
elif has_reduce_ex:
try:
# we're implementing protocol 2
reduce_val = obj.__reduce_ex__(2)
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
if reduce_val and isinstance(reduce_val, string_types):
try:
varpath = iter(reduce_val.split('.'))
# curmod will be transformed by the
# loop into the value to pickle
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
# replace obj with value retrieved
return self._flatten(curmod)
except KeyError:
# well, we can't do anything with that, so we ignore it
pass
elif reduce_val:
# at this point, reduce_val should be some kind of iterable
# pad out to len 5
rv_as_list = list(reduce_val)
insufficiency = 5 - len(rv_as_list)
if insufficiency:
rv_as_list += [None] * insufficiency
if getattr(rv_as_list[0], '__name__', '') == '__newobj__':
rv_as_list[0] = tags.NEWOBJ
f, args, state, listitems, dictitems = rv_as_list
# check that getstate/setstate is sane
if not (
state
and hasattr(obj, '__getstate__')
and not hasattr(obj, '__setstate__')
and not isinstance(obj, dict)
):
# turn iterators to iterables for convenient serialization
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
reduce_args = list(map(self._flatten, rv_as_list))
last_index = len(reduce_args) - 1
while last_index >= 2 and reduce_args[last_index] is None:
last_index -= 1
data[tags.REDUCE] = reduce_args[: last_index + 1]
return data
if has_class and not util.is_module(obj):
if self.unpicklable:
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(map(self._flatten, obj.__getnewargs_ex__()))
if has_getnewargs and not has_getnewargs_ex:
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
# Has getstate but it cannot be called, e.g. file descriptors
# in Python3
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '{name}/{name}'.format(name=obj.__name__)
else:
data = compat.ustr(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
# force list in python 3
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
# Support objects that subclasses list and set
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
# hack for zope persistent objects; this unghostifies the object
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
# catchall return for data created above without a return
# (e.g. __getnewargs__ is not supposed to be the end of the story)
if data:
return data
self._pickle_warning(obj)
return None
def _flatten_function(self, obj):
if self.unpicklable:
data = {tags.FUNCTION: util.importable_name(obj)}
else:
data = None
return data
def _flatten_dict_obj(self, obj, data=None):
"""Recursively call flatten() and return json-friendly dict"""
if data is None:
data = obj.__class__()
# If we allow non-string keys then we have to do a two-phase
# encoding to ensure that the reference IDs are deterministic.
if self.keys:
# Phase 1: serialize regular objects, ignore fancy keys.
flatten = self._flatten_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# Phase 2: serialize non-string keys.
flatten = self._flatten_non_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
else:
# If we have string keys only then we only need a single pass.
flatten = self._flatten_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# the collections.defaultdict protocol
if hasattr(obj, 'default_factory') and callable(obj.default_factory):
factory = obj.default_factory
if util.is_type(factory):
# Reference the class/type
value = _mktyperef(factory)
else:
# The factory is not a type and could reference e.g. functions
# or even the object instance itself, which creates a cycle.
if self._mkref(factory):
# We've never seen this object before so pickle it in-place.
# Create an instance from the factory and assume that the
# resulting instance is a suitable examplar.
value = self._flatten_obj_instance(handlers.CloneFactory(factory()))
else:
# We've seen this object before.
# Break the cycle by emitting a reference.
value = self._getref(factory)
data['default_factory'] = value
# Sub-classes of dict
if hasattr(obj, '__dict__') and self.unpicklable:
dict_data = {}
self._flatten_dict_obj(obj.__dict__, dict_data)
data['__dict__'] = dict_data
return data
def _flatten_obj_attrs(self, obj, attrs, data):
flatten = self._flatten_key_value_pair
ok = False
for k in attrs:
try:
value = getattr(obj, k)
flatten(k, value, data)
except AttributeError:
# The attribute may have been deleted
continue
ok = True
return ok
def _flatten_newstyle_with_slots(self, obj, data):
"""Return a json-friendly dict for new-style objects with __slots__."""
allslots = [
_wrap_string_slot(getattr(cls, '__slots__', tuple()))
for cls in obj.__class__.mro()
]
if not self._flatten_obj_attrs(obj, chain(*allslots), data):
attrs = [
x for x in dir(obj) if not x.startswith('__') and not x.endswith('__')
]
self._flatten_obj_attrs(obj, attrs, data)
return data
def _flatten_key_value_pair(self, k, v, data):
"""Flatten a key/value pair into the passed-in dictionary."""
if not util.is_picklable(k, v):
return data
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_non_string_key_value_pair(self, k, v, data):
"""Flatten only non-string key/value pairs"""
if not util.is_picklable(k, v):
return data
if self.keys and not isinstance(k, string_types):
k = self._escape_key(k)
data[k] = self._flatten(v)
return data
def _flatten_string_key_value_pair(self, k, v, data):
"""Flatten string key/value pairs only."""
if not util.is_picklable(k, v):
return data
if self.keys:
if not isinstance(k, string_types):
return data
elif k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_sequence_obj(self, obj, data):
"""Return a json-friendly dict for a sequence subclass."""
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data
def _escape_key(self, k):
return tags.JSON_KEY + encode(
k,
reset=False,
keys=True,
context=self,
backend=self.backend,
make_refs=self.make_refs,
)
def _getstate(self, obj, data):
state = self._flatten(obj)
if self.unpicklable:
data[tags.STATE] = state
else:
data = state
return data
def _pickle_warning(self, obj):
if self.warn:
msg = 'jsonpickle cannot pickle %r: replaced with None' % obj
warnings.warn(msg)
def _in_cycle(obj, objs, max_reached, make_refs):
"""Detect cyclic structures that would lead to infinite recursion"""
return (
(max_reached or (not make_refs and id(obj) in objs))
and not util.is_primitive(obj)
and not util.is_enum(obj)
)
def _mktyperef(obj):
"""Return a typeref dictionary
>>> _mktyperef(AssertionError) == {'py/type': 'builtins.AssertionError'}
True
"""
return {tags.TYPE: util.importable_name(obj)}
def _wrap_string_slot(string):
"""Converts __slots__ = 'a' into __slots__ = ('a',)"""
if isinstance(string, string_types):
return (string,)
return string
|
def _log_ref(self, obj):
"""
Log a reference to an in-memory object.
Return True if this object is new and was assigned
a new ID. Otherwise return False.
"""
objid = id(obj)
is_new = objid not in self._objs
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new
| 196 | 207 |
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009-2018 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from __future__ import absolute_import, division, unicode_literals
import decimal
import warnings
import sys
import types
from itertools import chain, islice
from . import compat
from . import util
from . import tags
from . import handlers
from .backend import json
from .compat import numeric_types, string_types, PY3, PY2
def encode(
value,
unpicklable=True,
make_refs=True,
keys=False,
max_depth=None,
reset=True,
backend=None,
warn=False,
context=None,
max_iter=None,
use_decimal=False,
numeric_keys=False,
use_base85=False,
fail_safe=None,
indent=None,
separators=None,
):
"""Return a JSON formatted representation of value, a Python object.
:param unpicklable: If set to False then the output will not contain the
information necessary to turn the JSON data back into Python objects,
but a simpler JSON stream is produced.
:param max_depth: If set to a non-negative integer then jsonpickle will
not recurse deeper than 'max_depth' steps into the object. Anything
deeper than 'max_depth' is represented using a Python repr() of the
object.
:param make_refs: If set to False jsonpickle's referencing support is
disabled. Objects that are id()-identical won't be preserved across
encode()/decode(), but the resulting JSON stream will be conceptually
simpler. jsonpickle detects cyclical objects and will break the cycle
by calling repr() instead of recursing when make_refs is set False.
:param keys: If set to True then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
This is typically what you want if you need to support Integer or
objects as dictionary keys.
:param numeric_keys: Only use this option if the backend supports integer
dict keys natively. This flag tells jsonpickle to leave numeric keys
as-is rather than conforming them to json-friendly strings.
Using ``keys=True`` is the typical solution for integer keys, so only
use this if you have a specific use case where you want to allow the
backend to handle serialization of numeric dict keys.
:param warn: If set to True then jsonpickle will warn when it
returns None for an object which it cannot pickle
(e.g. file descriptors).
:param max_iter: If set to a non-negative integer then jsonpickle will
consume at most `max_iter` items when pickling iterators.
:param use_decimal: If set to True jsonpickle will allow Decimal
instances to pass-through, with the assumption that the simplejson
backend will be used in `use_decimal` mode. In order to use this mode
you will need to configure simplejson::
jsonpickle.set_encoder_options('simplejson',
use_decimal=True, sort_keys=True)
jsonpickle.set_decoder_options('simplejson',
use_decimal=True)
jsonpickle.set_preferred_backend('simplejson')
NOTE: A side-effect of the above settings is that float values will be
converted to Decimal when converting to json.
:param use_base85:
If possible, use base85 to encode binary data. Base85 bloats binary data
by 1/4 as opposed to base64, which expands it by 1/3. This argument is
ignored on Python 2 because it doesn't support it.
:param fail_safe: If set to a function exceptions are ignored when pickling
and if a exception happens the function is called and the return value
is used as the value for the object that caused the error
:param indent: When `indent` is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that indent
level. An indent level of 0 will only insert newlines. ``None`` is
the most compact representation. Since the default item separator is
``(', ', ': ')``, the output might include trailing whitespace when
``indent`` is specified. You can use ``separators=(',', ': ')`` to
avoid this. This value is passed directly to the active JSON backend
library and not used by jsonpickle directly.
:param separators:
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')``
separators. ``(',', ':')`` is the most compact JSON representation.
This value is passed directly to the active JSON backend library and
not used by jsonpickle directly.
>>> encode('my string') == '"my string"'
True
>>> encode(36) == '36'
True
>>> encode({'foo': True}) == '{"foo": true}'
True
>>> encode({'foo': [1, 2, [3, 4]]}, max_depth=1)
'{"foo": "[1, 2, [3, 4]]"}'
"""
backend = backend or json
context = context or Pickler(
unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
backend=backend,
max_depth=max_depth,
warn=warn,
max_iter=max_iter,
numeric_keys=numeric_keys,
use_decimal=use_decimal,
use_base85=use_base85,
fail_safe=fail_safe,
)
return backend.encode(
context.flatten(value, reset=reset), indent=indent, separators=separators
)
class Pickler(object):
def __init__(
self,
unpicklable=True,
make_refs=True,
max_depth=None,
backend=None,
keys=False,
warn=False,
max_iter=None,
numeric_keys=False,
use_decimal=False,
use_base85=False,
fail_safe=None,
):
self.unpicklable = unpicklable
self.make_refs = make_refs
self.backend = backend or json
self.keys = keys
self.warn = warn
self.numeric_keys = numeric_keys
self.use_base85 = use_base85 and (not PY2)
# The current recursion depth
self._depth = -1
# The maximal recursion depth
self._max_depth = max_depth
# Maps id(obj) to reference IDs
self._objs = {}
# Avoids garbage collection
self._seen = []
# maximum amount of items to take from a pickled iterator
self._max_iter = max_iter
# Whether to allow decimals to pass-through
self._use_decimal = use_decimal
if self.use_base85:
self._bytes_tag = tags.B85
self._bytes_encoder = util.b85encode
else:
self._bytes_tag = tags.B64
self._bytes_encoder = util.b64encode
# ignore exceptions
self.fail_safe = fail_safe
def reset(self):
self._objs = {}
self._depth = -1
self._seen = []
def _push(self):
"""Steps down one level in the namespace."""
self._depth += 1
def _pop(self, value):
"""Step up one level in the namespace and return the value.
If we're at the root, reset the pickler's state.
"""
self._depth -= 1
if self._depth == -1:
self.reset()
return value
def _log_ref(self, obj):
"""
Log a reference to an in-memory object.
Return True if this object is new and was assigned
a new ID. Otherwise return False.
"""
objid = id(obj)
is_new = objid not in self._objs
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new
def _mkref(self, obj):
"""
Log a reference to an in-memory object, and return
if that object should be considered newly logged.
"""
is_new = self._log_ref(obj)
# Pretend the object is new
pretend_new = not self.unpicklable or not self.make_refs
return pretend_new or is_new
def _getref(self, obj):
return {tags.ID: self._objs.get(id(obj))}
def flatten(self, obj, reset=True):
"""Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world') == 'hello world'
True
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'}) == {'key': 'value'}
True
"""
if reset:
self.reset()
return self._flatten(obj)
def _flatten(self, obj):
#########################################
# if obj is nonrecursive return immediately
# for performance reasons we don't want to do recursive checks
if PY2 and isinstance(obj, types.FileType):
return self._flatten_file(obj)
if util.is_bytes(obj):
return self._flatten_bytestring(obj)
if util.is_primitive(obj):
return obj
# Decimal is a primitive when use_decimal is True
if self._use_decimal and isinstance(obj, decimal.Decimal):
return obj
#########################################
self._push()
return self._pop(self._flatten_obj(obj))
def _max_reached(self):
return self._depth == self._max_depth
def _flatten_obj(self, obj):
self._seen.append(obj)
max_reached = self._max_reached()
try:
in_cycle = _in_cycle(obj, self._objs, max_reached, self.make_refs)
if in_cycle:
# break the cycle
flatten_func = repr
else:
flatten_func = self._get_flattener(obj)
if flatten_func is None:
self._pickle_warning(obj)
return None
return flatten_func(obj)
except (KeyboardInterrupt, SystemExit) as e:
raise e
except Exception as e:
if self.fail_safe is None:
raise e
else:
return self.fail_safe(e)
def _list_recurse(self, obj):
return [self._flatten(v) for v in obj]
def _get_flattener(self, obj):
list_recurse = self._list_recurse
if util.is_list(obj):
if self._mkref(obj):
return list_recurse
else:
self._push()
return self._getref
# We handle tuples and sets by encoding them in a "(tuple|set)dict"
if util.is_tuple(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]}
if util.is_set(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.SET: [self._flatten(v) for v in obj]}
if util.is_dictionary(obj):
return self._flatten_dict_obj
if util.is_type(obj):
return _mktyperef
if util.is_object(obj):
return self._ref_obj_instance
if util.is_module_function(obj):
return self._flatten_function
# instance methods, lambdas, old style classes...
self._pickle_warning(obj)
return None
def _ref_obj_instance(self, obj):
"""Reference an existing object or flatten if new"""
if self.unpicklable:
if self._mkref(obj):
# We've never seen this object so return its
# json representation.
return self._flatten_obj_instance(obj)
# We've seen this object before so place an object
# reference tag in the data. This avoids infinite recursion
# when processing cyclical objects.
return self._getref(obj)
else:
max_reached = self._max_reached()
in_cycle = _in_cycle(obj, self._objs, max_reached, False)
if in_cycle:
# A circular becomes None.
return None
self._mkref(obj)
return self._flatten_obj_instance(obj)
def _flatten_file(self, obj):
"""
Special case file objects
"""
assert not PY3 and isinstance(obj, types.FileType)
return None
def _flatten_bytestring(self, obj):
if PY2:
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
pass
return {self._bytes_tag: self._bytes_encoder(obj)}
def _flatten_obj_instance(self, obj):
"""Recursively flatten an instance and return a json-friendly dict"""
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = not has_dict and hasattr(obj, '__slots__')
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
has_reduce, has_reduce_ex = util.has_reduce(obj)
# Support objects with __getstate__(); this ensures that
# both __setstate__() and __getstate__() are implemented
has_getstate = hasattr(obj, '__getstate__')
# not using has_method since __getstate__() is handled separately below
if has_class:
cls = obj.__class__
else:
cls = type(obj)
# Check for a custom handler
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if handler is not None:
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if has_reduce and not has_reduce_ex:
try:
reduce_val = obj.__reduce__()
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
# test for a reduce implementation, and redirect before
# doing anything else if that is what reduce requests
elif has_reduce_ex:
try:
# we're implementing protocol 2
reduce_val = obj.__reduce_ex__(2)
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
if reduce_val and isinstance(reduce_val, string_types):
try:
varpath = iter(reduce_val.split('.'))
# curmod will be transformed by the
# loop into the value to pickle
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
# replace obj with value retrieved
return self._flatten(curmod)
except KeyError:
# well, we can't do anything with that, so we ignore it
pass
elif reduce_val:
# at this point, reduce_val should be some kind of iterable
# pad out to len 5
rv_as_list = list(reduce_val)
insufficiency = 5 - len(rv_as_list)
if insufficiency:
rv_as_list += [None] * insufficiency
if getattr(rv_as_list[0], '__name__', '') == '__newobj__':
rv_as_list[0] = tags.NEWOBJ
f, args, state, listitems, dictitems = rv_as_list
# check that getstate/setstate is sane
if not (
state
and hasattr(obj, '__getstate__')
and not hasattr(obj, '__setstate__')
and not isinstance(obj, dict)
):
# turn iterators to iterables for convenient serialization
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
reduce_args = list(map(self._flatten, rv_as_list))
last_index = len(reduce_args) - 1
while last_index >= 2 and reduce_args[last_index] is None:
last_index -= 1
data[tags.REDUCE] = reduce_args[: last_index + 1]
return data
if has_class and not util.is_module(obj):
if self.unpicklable:
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(map(self._flatten, obj.__getnewargs_ex__()))
if has_getnewargs and not has_getnewargs_ex:
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
# Has getstate but it cannot be called, e.g. file descriptors
# in Python3
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '{name}/{name}'.format(name=obj.__name__)
else:
data = compat.ustr(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
# force list in python 3
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
# Support objects that subclasses list and set
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
# hack for zope persistent objects; this unghostifies the object
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
# catchall return for data created above without a return
# (e.g. __getnewargs__ is not supposed to be the end of the story)
if data:
return data
self._pickle_warning(obj)
return None
def _flatten_function(self, obj):
if self.unpicklable:
data = {tags.FUNCTION: util.importable_name(obj)}
else:
data = None
return data
def _flatten_dict_obj(self, obj, data=None):
"""Recursively call flatten() and return json-friendly dict"""
if data is None:
data = obj.__class__()
# If we allow non-string keys then we have to do a two-phase
# encoding to ensure that the reference IDs are deterministic.
if self.keys:
# Phase 1: serialize regular objects, ignore fancy keys.
flatten = self._flatten_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# Phase 2: serialize non-string keys.
flatten = self._flatten_non_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
else:
# If we have string keys only then we only need a single pass.
flatten = self._flatten_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# the collections.defaultdict protocol
if hasattr(obj, 'default_factory') and callable(obj.default_factory):
factory = obj.default_factory
if util.is_type(factory):
# Reference the class/type
value = _mktyperef(factory)
else:
# The factory is not a type and could reference e.g. functions
# or even the object instance itself, which creates a cycle.
if self._mkref(factory):
# We've never seen this object before so pickle it in-place.
# Create an instance from the factory and assume that the
# resulting instance is a suitable examplar.
value = self._flatten_obj_instance(handlers.CloneFactory(factory()))
else:
# We've seen this object before.
# Break the cycle by emitting a reference.
value = self._getref(factory)
data['default_factory'] = value
# Sub-classes of dict
if hasattr(obj, '__dict__') and self.unpicklable:
dict_data = {}
self._flatten_dict_obj(obj.__dict__, dict_data)
data['__dict__'] = dict_data
return data
def _flatten_obj_attrs(self, obj, attrs, data):
flatten = self._flatten_key_value_pair
ok = False
for k in attrs:
try:
value = getattr(obj, k)
flatten(k, value, data)
except AttributeError:
# The attribute may have been deleted
continue
ok = True
return ok
def _flatten_newstyle_with_slots(self, obj, data):
"""Return a json-friendly dict for new-style objects with __slots__."""
allslots = [
_wrap_string_slot(getattr(cls, '__slots__', tuple()))
for cls in obj.__class__.mro()
]
if not self._flatten_obj_attrs(obj, chain(*allslots), data):
attrs = [
x for x in dir(obj) if not x.startswith('__') and not x.endswith('__')
]
self._flatten_obj_attrs(obj, attrs, data)
return data
def _flatten_key_value_pair(self, k, v, data):
"""Flatten a key/value pair into the passed-in dictionary."""
if not util.is_picklable(k, v):
return data
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_non_string_key_value_pair(self, k, v, data):
"""Flatten only non-string key/value pairs"""
if not util.is_picklable(k, v):
return data
if self.keys and not isinstance(k, string_types):
k = self._escape_key(k)
data[k] = self._flatten(v)
return data
def _flatten_string_key_value_pair(self, k, v, data):
"""Flatten string key/value pairs only."""
if not util.is_picklable(k, v):
return data
if self.keys:
if not isinstance(k, string_types):
return data
elif k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_sequence_obj(self, obj, data):
"""Return a json-friendly dict for a sequence subclass."""
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data
def _escape_key(self, k):
return tags.JSON_KEY + encode(
k,
reset=False,
keys=True,
context=self,
backend=self.backend,
make_refs=self.make_refs,
)
def _getstate(self, obj, data):
state = self._flatten(obj)
if self.unpicklable:
data[tags.STATE] = state
else:
data = state
return data
def _pickle_warning(self, obj):
if self.warn:
msg = 'jsonpickle cannot pickle %r: replaced with None' % obj
warnings.warn(msg)
def _in_cycle(obj, objs, max_reached, make_refs):
"""Detect cyclic structures that would lead to infinite recursion"""
return (
(max_reached or (not make_refs and id(obj) in objs))
and not util.is_primitive(obj)
and not util.is_enum(obj)
)
def _mktyperef(obj):
"""Return a typeref dictionary
>>> _mktyperef(AssertionError) == {'py/type': 'builtins.AssertionError'}
True
"""
return {tags.TYPE: util.importable_name(obj)}
def _wrap_string_slot(string):
"""Converts __slots__ = 'a' into __slots__ = ('a',)"""
if isinstance(string, string_types):
return (string,)
return string
|
flatten
|
Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world') == 'hello world'
True
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'}) == {'key': 'value'}
True
|
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009-2018 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from __future__ import absolute_import, division, unicode_literals
import decimal
import warnings
import sys
import types
from itertools import chain, islice
from . import compat
from . import util
from . import tags
from . import handlers
from .backend import json
from .compat import numeric_types, string_types, PY3, PY2
def encode(
value,
unpicklable=True,
make_refs=True,
keys=False,
max_depth=None,
reset=True,
backend=None,
warn=False,
context=None,
max_iter=None,
use_decimal=False,
numeric_keys=False,
use_base85=False,
fail_safe=None,
indent=None,
separators=None,
):
"""Return a JSON formatted representation of value, a Python object.
:param unpicklable: If set to False then the output will not contain the
information necessary to turn the JSON data back into Python objects,
but a simpler JSON stream is produced.
:param max_depth: If set to a non-negative integer then jsonpickle will
not recurse deeper than 'max_depth' steps into the object. Anything
deeper than 'max_depth' is represented using a Python repr() of the
object.
:param make_refs: If set to False jsonpickle's referencing support is
disabled. Objects that are id()-identical won't be preserved across
encode()/decode(), but the resulting JSON stream will be conceptually
simpler. jsonpickle detects cyclical objects and will break the cycle
by calling repr() instead of recursing when make_refs is set False.
:param keys: If set to True then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
This is typically what you want if you need to support Integer or
objects as dictionary keys.
:param numeric_keys: Only use this option if the backend supports integer
dict keys natively. This flag tells jsonpickle to leave numeric keys
as-is rather than conforming them to json-friendly strings.
Using ``keys=True`` is the typical solution for integer keys, so only
use this if you have a specific use case where you want to allow the
backend to handle serialization of numeric dict keys.
:param warn: If set to True then jsonpickle will warn when it
returns None for an object which it cannot pickle
(e.g. file descriptors).
:param max_iter: If set to a non-negative integer then jsonpickle will
consume at most `max_iter` items when pickling iterators.
:param use_decimal: If set to True jsonpickle will allow Decimal
instances to pass-through, with the assumption that the simplejson
backend will be used in `use_decimal` mode. In order to use this mode
you will need to configure simplejson::
jsonpickle.set_encoder_options('simplejson',
use_decimal=True, sort_keys=True)
jsonpickle.set_decoder_options('simplejson',
use_decimal=True)
jsonpickle.set_preferred_backend('simplejson')
NOTE: A side-effect of the above settings is that float values will be
converted to Decimal when converting to json.
:param use_base85:
If possible, use base85 to encode binary data. Base85 bloats binary data
by 1/4 as opposed to base64, which expands it by 1/3. This argument is
ignored on Python 2 because it doesn't support it.
:param fail_safe: If set to a function exceptions are ignored when pickling
and if a exception happens the function is called and the return value
is used as the value for the object that caused the error
:param indent: When `indent` is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that indent
level. An indent level of 0 will only insert newlines. ``None`` is
the most compact representation. Since the default item separator is
``(', ', ': ')``, the output might include trailing whitespace when
``indent`` is specified. You can use ``separators=(',', ': ')`` to
avoid this. This value is passed directly to the active JSON backend
library and not used by jsonpickle directly.
:param separators:
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')``
separators. ``(',', ':')`` is the most compact JSON representation.
This value is passed directly to the active JSON backend library and
not used by jsonpickle directly.
>>> encode('my string') == '"my string"'
True
>>> encode(36) == '36'
True
>>> encode({'foo': True}) == '{"foo": true}'
True
>>> encode({'foo': [1, 2, [3, 4]]}, max_depth=1)
'{"foo": "[1, 2, [3, 4]]"}'
"""
backend = backend or json
context = context or Pickler(
unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
backend=backend,
max_depth=max_depth,
warn=warn,
max_iter=max_iter,
numeric_keys=numeric_keys,
use_decimal=use_decimal,
use_base85=use_base85,
fail_safe=fail_safe,
)
return backend.encode(
context.flatten(value, reset=reset), indent=indent, separators=separators
)
class Pickler(object):
def __init__(
self,
unpicklable=True,
make_refs=True,
max_depth=None,
backend=None,
keys=False,
warn=False,
max_iter=None,
numeric_keys=False,
use_decimal=False,
use_base85=False,
fail_safe=None,
):
self.unpicklable = unpicklable
self.make_refs = make_refs
self.backend = backend or json
self.keys = keys
self.warn = warn
self.numeric_keys = numeric_keys
self.use_base85 = use_base85 and (not PY2)
# The current recursion depth
self._depth = -1
# The maximal recursion depth
self._max_depth = max_depth
# Maps id(obj) to reference IDs
self._objs = {}
# Avoids garbage collection
self._seen = []
# maximum amount of items to take from a pickled iterator
self._max_iter = max_iter
# Whether to allow decimals to pass-through
self._use_decimal = use_decimal
if self.use_base85:
self._bytes_tag = tags.B85
self._bytes_encoder = util.b85encode
else:
self._bytes_tag = tags.B64
self._bytes_encoder = util.b64encode
# ignore exceptions
self.fail_safe = fail_safe
def reset(self):
self._objs = {}
self._depth = -1
self._seen = []
def _push(self):
"""Steps down one level in the namespace."""
self._depth += 1
def _pop(self, value):
"""Step up one level in the namespace and return the value.
If we're at the root, reset the pickler's state.
"""
self._depth -= 1
if self._depth == -1:
self.reset()
return value
def _log_ref(self, obj):
"""
Log a reference to an in-memory object.
Return True if this object is new and was assigned
a new ID. Otherwise return False.
"""
objid = id(obj)
is_new = objid not in self._objs
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new
def _mkref(self, obj):
"""
Log a reference to an in-memory object, and return
if that object should be considered newly logged.
"""
is_new = self._log_ref(obj)
# Pretend the object is new
pretend_new = not self.unpicklable or not self.make_refs
return pretend_new or is_new
def _getref(self, obj):
return {tags.ID: self._objs.get(id(obj))}
# MASKED: flatten function (lines 222-252)
def _flatten(self, obj):
#########################################
# if obj is nonrecursive return immediately
# for performance reasons we don't want to do recursive checks
if PY2 and isinstance(obj, types.FileType):
return self._flatten_file(obj)
if util.is_bytes(obj):
return self._flatten_bytestring(obj)
if util.is_primitive(obj):
return obj
# Decimal is a primitive when use_decimal is True
if self._use_decimal and isinstance(obj, decimal.Decimal):
return obj
#########################################
self._push()
return self._pop(self._flatten_obj(obj))
def _max_reached(self):
return self._depth == self._max_depth
def _flatten_obj(self, obj):
self._seen.append(obj)
max_reached = self._max_reached()
try:
in_cycle = _in_cycle(obj, self._objs, max_reached, self.make_refs)
if in_cycle:
# break the cycle
flatten_func = repr
else:
flatten_func = self._get_flattener(obj)
if flatten_func is None:
self._pickle_warning(obj)
return None
return flatten_func(obj)
except (KeyboardInterrupt, SystemExit) as e:
raise e
except Exception as e:
if self.fail_safe is None:
raise e
else:
return self.fail_safe(e)
def _list_recurse(self, obj):
return [self._flatten(v) for v in obj]
def _get_flattener(self, obj):
list_recurse = self._list_recurse
if util.is_list(obj):
if self._mkref(obj):
return list_recurse
else:
self._push()
return self._getref
# We handle tuples and sets by encoding them in a "(tuple|set)dict"
if util.is_tuple(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]}
if util.is_set(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.SET: [self._flatten(v) for v in obj]}
if util.is_dictionary(obj):
return self._flatten_dict_obj
if util.is_type(obj):
return _mktyperef
if util.is_object(obj):
return self._ref_obj_instance
if util.is_module_function(obj):
return self._flatten_function
# instance methods, lambdas, old style classes...
self._pickle_warning(obj)
return None
def _ref_obj_instance(self, obj):
"""Reference an existing object or flatten if new"""
if self.unpicklable:
if self._mkref(obj):
# We've never seen this object so return its
# json representation.
return self._flatten_obj_instance(obj)
# We've seen this object before so place an object
# reference tag in the data. This avoids infinite recursion
# when processing cyclical objects.
return self._getref(obj)
else:
max_reached = self._max_reached()
in_cycle = _in_cycle(obj, self._objs, max_reached, False)
if in_cycle:
# A circular becomes None.
return None
self._mkref(obj)
return self._flatten_obj_instance(obj)
def _flatten_file(self, obj):
"""
Special case file objects
"""
assert not PY3 and isinstance(obj, types.FileType)
return None
def _flatten_bytestring(self, obj):
if PY2:
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
pass
return {self._bytes_tag: self._bytes_encoder(obj)}
def _flatten_obj_instance(self, obj):
"""Recursively flatten an instance and return a json-friendly dict"""
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = not has_dict and hasattr(obj, '__slots__')
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
has_reduce, has_reduce_ex = util.has_reduce(obj)
# Support objects with __getstate__(); this ensures that
# both __setstate__() and __getstate__() are implemented
has_getstate = hasattr(obj, '__getstate__')
# not using has_method since __getstate__() is handled separately below
if has_class:
cls = obj.__class__
else:
cls = type(obj)
# Check for a custom handler
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if handler is not None:
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if has_reduce and not has_reduce_ex:
try:
reduce_val = obj.__reduce__()
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
# test for a reduce implementation, and redirect before
# doing anything else if that is what reduce requests
elif has_reduce_ex:
try:
# we're implementing protocol 2
reduce_val = obj.__reduce_ex__(2)
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
if reduce_val and isinstance(reduce_val, string_types):
try:
varpath = iter(reduce_val.split('.'))
# curmod will be transformed by the
# loop into the value to pickle
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
# replace obj with value retrieved
return self._flatten(curmod)
except KeyError:
# well, we can't do anything with that, so we ignore it
pass
elif reduce_val:
# at this point, reduce_val should be some kind of iterable
# pad out to len 5
rv_as_list = list(reduce_val)
insufficiency = 5 - len(rv_as_list)
if insufficiency:
rv_as_list += [None] * insufficiency
if getattr(rv_as_list[0], '__name__', '') == '__newobj__':
rv_as_list[0] = tags.NEWOBJ
f, args, state, listitems, dictitems = rv_as_list
# check that getstate/setstate is sane
if not (
state
and hasattr(obj, '__getstate__')
and not hasattr(obj, '__setstate__')
and not isinstance(obj, dict)
):
# turn iterators to iterables for convenient serialization
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
reduce_args = list(map(self._flatten, rv_as_list))
last_index = len(reduce_args) - 1
while last_index >= 2 and reduce_args[last_index] is None:
last_index -= 1
data[tags.REDUCE] = reduce_args[: last_index + 1]
return data
if has_class and not util.is_module(obj):
if self.unpicklable:
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(map(self._flatten, obj.__getnewargs_ex__()))
if has_getnewargs and not has_getnewargs_ex:
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
# Has getstate but it cannot be called, e.g. file descriptors
# in Python3
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '{name}/{name}'.format(name=obj.__name__)
else:
data = compat.ustr(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
# force list in python 3
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
# Support objects that subclasses list and set
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
# hack for zope persistent objects; this unghostifies the object
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
# catchall return for data created above without a return
# (e.g. __getnewargs__ is not supposed to be the end of the story)
if data:
return data
self._pickle_warning(obj)
return None
def _flatten_function(self, obj):
if self.unpicklable:
data = {tags.FUNCTION: util.importable_name(obj)}
else:
data = None
return data
def _flatten_dict_obj(self, obj, data=None):
"""Recursively call flatten() and return json-friendly dict"""
if data is None:
data = obj.__class__()
# If we allow non-string keys then we have to do a two-phase
# encoding to ensure that the reference IDs are deterministic.
if self.keys:
# Phase 1: serialize regular objects, ignore fancy keys.
flatten = self._flatten_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# Phase 2: serialize non-string keys.
flatten = self._flatten_non_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
else:
# If we have string keys only then we only need a single pass.
flatten = self._flatten_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# the collections.defaultdict protocol
if hasattr(obj, 'default_factory') and callable(obj.default_factory):
factory = obj.default_factory
if util.is_type(factory):
# Reference the class/type
value = _mktyperef(factory)
else:
# The factory is not a type and could reference e.g. functions
# or even the object instance itself, which creates a cycle.
if self._mkref(factory):
# We've never seen this object before so pickle it in-place.
# Create an instance from the factory and assume that the
# resulting instance is a suitable examplar.
value = self._flatten_obj_instance(handlers.CloneFactory(factory()))
else:
# We've seen this object before.
# Break the cycle by emitting a reference.
value = self._getref(factory)
data['default_factory'] = value
# Sub-classes of dict
if hasattr(obj, '__dict__') and self.unpicklable:
dict_data = {}
self._flatten_dict_obj(obj.__dict__, dict_data)
data['__dict__'] = dict_data
return data
def _flatten_obj_attrs(self, obj, attrs, data):
flatten = self._flatten_key_value_pair
ok = False
for k in attrs:
try:
value = getattr(obj, k)
flatten(k, value, data)
except AttributeError:
# The attribute may have been deleted
continue
ok = True
return ok
def _flatten_newstyle_with_slots(self, obj, data):
"""Return a json-friendly dict for new-style objects with __slots__."""
allslots = [
_wrap_string_slot(getattr(cls, '__slots__', tuple()))
for cls in obj.__class__.mro()
]
if not self._flatten_obj_attrs(obj, chain(*allslots), data):
attrs = [
x for x in dir(obj) if not x.startswith('__') and not x.endswith('__')
]
self._flatten_obj_attrs(obj, attrs, data)
return data
def _flatten_key_value_pair(self, k, v, data):
"""Flatten a key/value pair into the passed-in dictionary."""
if not util.is_picklable(k, v):
return data
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_non_string_key_value_pair(self, k, v, data):
"""Flatten only non-string key/value pairs"""
if not util.is_picklable(k, v):
return data
if self.keys and not isinstance(k, string_types):
k = self._escape_key(k)
data[k] = self._flatten(v)
return data
def _flatten_string_key_value_pair(self, k, v, data):
"""Flatten string key/value pairs only."""
if not util.is_picklable(k, v):
return data
if self.keys:
if not isinstance(k, string_types):
return data
elif k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_sequence_obj(self, obj, data):
"""Return a json-friendly dict for a sequence subclass."""
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data
def _escape_key(self, k):
return tags.JSON_KEY + encode(
k,
reset=False,
keys=True,
context=self,
backend=self.backend,
make_refs=self.make_refs,
)
def _getstate(self, obj, data):
state = self._flatten(obj)
if self.unpicklable:
data[tags.STATE] = state
else:
data = state
return data
def _pickle_warning(self, obj):
if self.warn:
msg = 'jsonpickle cannot pickle %r: replaced with None' % obj
warnings.warn(msg)
def _in_cycle(obj, objs, max_reached, make_refs):
"""Detect cyclic structures that would lead to infinite recursion"""
return (
(max_reached or (not make_refs and id(obj) in objs))
and not util.is_primitive(obj)
and not util.is_enum(obj)
)
def _mktyperef(obj):
"""Return a typeref dictionary
>>> _mktyperef(AssertionError) == {'py/type': 'builtins.AssertionError'}
True
"""
return {tags.TYPE: util.importable_name(obj)}
def _wrap_string_slot(string):
"""Converts __slots__ = 'a' into __slots__ = ('a',)"""
if isinstance(string, string_types):
return (string,)
return string
|
def flatten(self, obj, reset=True):
"""Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world') == 'hello world'
True
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'}) == {'key': 'value'}
True
"""
if reset:
self.reset()
return self._flatten(obj)
| 222 | 252 |
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009-2018 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from __future__ import absolute_import, division, unicode_literals
import decimal
import warnings
import sys
import types
from itertools import chain, islice
from . import compat
from . import util
from . import tags
from . import handlers
from .backend import json
from .compat import numeric_types, string_types, PY3, PY2
def encode(
value,
unpicklable=True,
make_refs=True,
keys=False,
max_depth=None,
reset=True,
backend=None,
warn=False,
context=None,
max_iter=None,
use_decimal=False,
numeric_keys=False,
use_base85=False,
fail_safe=None,
indent=None,
separators=None,
):
"""Return a JSON formatted representation of value, a Python object.
:param unpicklable: If set to False then the output will not contain the
information necessary to turn the JSON data back into Python objects,
but a simpler JSON stream is produced.
:param max_depth: If set to a non-negative integer then jsonpickle will
not recurse deeper than 'max_depth' steps into the object. Anything
deeper than 'max_depth' is represented using a Python repr() of the
object.
:param make_refs: If set to False jsonpickle's referencing support is
disabled. Objects that are id()-identical won't be preserved across
encode()/decode(), but the resulting JSON stream will be conceptually
simpler. jsonpickle detects cyclical objects and will break the cycle
by calling repr() instead of recursing when make_refs is set False.
:param keys: If set to True then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
This is typically what you want if you need to support Integer or
objects as dictionary keys.
:param numeric_keys: Only use this option if the backend supports integer
dict keys natively. This flag tells jsonpickle to leave numeric keys
as-is rather than conforming them to json-friendly strings.
Using ``keys=True`` is the typical solution for integer keys, so only
use this if you have a specific use case where you want to allow the
backend to handle serialization of numeric dict keys.
:param warn: If set to True then jsonpickle will warn when it
returns None for an object which it cannot pickle
(e.g. file descriptors).
:param max_iter: If set to a non-negative integer then jsonpickle will
consume at most `max_iter` items when pickling iterators.
:param use_decimal: If set to True jsonpickle will allow Decimal
instances to pass-through, with the assumption that the simplejson
backend will be used in `use_decimal` mode. In order to use this mode
you will need to configure simplejson::
jsonpickle.set_encoder_options('simplejson',
use_decimal=True, sort_keys=True)
jsonpickle.set_decoder_options('simplejson',
use_decimal=True)
jsonpickle.set_preferred_backend('simplejson')
NOTE: A side-effect of the above settings is that float values will be
converted to Decimal when converting to json.
:param use_base85:
If possible, use base85 to encode binary data. Base85 bloats binary data
by 1/4 as opposed to base64, which expands it by 1/3. This argument is
ignored on Python 2 because it doesn't support it.
:param fail_safe: If set to a function exceptions are ignored when pickling
and if a exception happens the function is called and the return value
is used as the value for the object that caused the error
:param indent: When `indent` is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that indent
level. An indent level of 0 will only insert newlines. ``None`` is
the most compact representation. Since the default item separator is
``(', ', ': ')``, the output might include trailing whitespace when
``indent`` is specified. You can use ``separators=(',', ': ')`` to
avoid this. This value is passed directly to the active JSON backend
library and not used by jsonpickle directly.
:param separators:
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')``
separators. ``(',', ':')`` is the most compact JSON representation.
This value is passed directly to the active JSON backend library and
not used by jsonpickle directly.
>>> encode('my string') == '"my string"'
True
>>> encode(36) == '36'
True
>>> encode({'foo': True}) == '{"foo": true}'
True
>>> encode({'foo': [1, 2, [3, 4]]}, max_depth=1)
'{"foo": "[1, 2, [3, 4]]"}'
"""
backend = backend or json
context = context or Pickler(
unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
backend=backend,
max_depth=max_depth,
warn=warn,
max_iter=max_iter,
numeric_keys=numeric_keys,
use_decimal=use_decimal,
use_base85=use_base85,
fail_safe=fail_safe,
)
return backend.encode(
context.flatten(value, reset=reset), indent=indent, separators=separators
)
class Pickler(object):
def __init__(
self,
unpicklable=True,
make_refs=True,
max_depth=None,
backend=None,
keys=False,
warn=False,
max_iter=None,
numeric_keys=False,
use_decimal=False,
use_base85=False,
fail_safe=None,
):
self.unpicklable = unpicklable
self.make_refs = make_refs
self.backend = backend or json
self.keys = keys
self.warn = warn
self.numeric_keys = numeric_keys
self.use_base85 = use_base85 and (not PY2)
# The current recursion depth
self._depth = -1
# The maximal recursion depth
self._max_depth = max_depth
# Maps id(obj) to reference IDs
self._objs = {}
# Avoids garbage collection
self._seen = []
# maximum amount of items to take from a pickled iterator
self._max_iter = max_iter
# Whether to allow decimals to pass-through
self._use_decimal = use_decimal
if self.use_base85:
self._bytes_tag = tags.B85
self._bytes_encoder = util.b85encode
else:
self._bytes_tag = tags.B64
self._bytes_encoder = util.b64encode
# ignore exceptions
self.fail_safe = fail_safe
def reset(self):
self._objs = {}
self._depth = -1
self._seen = []
def _push(self):
"""Steps down one level in the namespace."""
self._depth += 1
def _pop(self, value):
"""Step up one level in the namespace and return the value.
If we're at the root, reset the pickler's state.
"""
self._depth -= 1
if self._depth == -1:
self.reset()
return value
def _log_ref(self, obj):
"""
Log a reference to an in-memory object.
Return True if this object is new and was assigned
a new ID. Otherwise return False.
"""
objid = id(obj)
is_new = objid not in self._objs
if is_new:
new_id = len(self._objs)
self._objs[objid] = new_id
return is_new
def _mkref(self, obj):
"""
Log a reference to an in-memory object, and return
if that object should be considered newly logged.
"""
is_new = self._log_ref(obj)
# Pretend the object is new
pretend_new = not self.unpicklable or not self.make_refs
return pretend_new or is_new
def _getref(self, obj):
return {tags.ID: self._objs.get(id(obj))}
def flatten(self, obj, reset=True):
"""Takes an object and returns a JSON-safe representation of it.
Simply returns any of the basic builtin datatypes
>>> p = Pickler()
>>> p.flatten('hello world') == 'hello world'
True
>>> p.flatten(49)
49
>>> p.flatten(350.0)
350.0
>>> p.flatten(True)
True
>>> p.flatten(False)
False
>>> r = p.flatten(None)
>>> r is None
True
>>> p.flatten(False)
False
>>> p.flatten([1, 2, 3, 4])
[1, 2, 3, 4]
>>> p.flatten((1,2,))[tags.TUPLE]
[1, 2]
>>> p.flatten({'key': 'value'}) == {'key': 'value'}
True
"""
if reset:
self.reset()
return self._flatten(obj)
def _flatten(self, obj):
#########################################
# if obj is nonrecursive return immediately
# for performance reasons we don't want to do recursive checks
if PY2 and isinstance(obj, types.FileType):
return self._flatten_file(obj)
if util.is_bytes(obj):
return self._flatten_bytestring(obj)
if util.is_primitive(obj):
return obj
# Decimal is a primitive when use_decimal is True
if self._use_decimal and isinstance(obj, decimal.Decimal):
return obj
#########################################
self._push()
return self._pop(self._flatten_obj(obj))
def _max_reached(self):
return self._depth == self._max_depth
def _flatten_obj(self, obj):
self._seen.append(obj)
max_reached = self._max_reached()
try:
in_cycle = _in_cycle(obj, self._objs, max_reached, self.make_refs)
if in_cycle:
# break the cycle
flatten_func = repr
else:
flatten_func = self._get_flattener(obj)
if flatten_func is None:
self._pickle_warning(obj)
return None
return flatten_func(obj)
except (KeyboardInterrupt, SystemExit) as e:
raise e
except Exception as e:
if self.fail_safe is None:
raise e
else:
return self.fail_safe(e)
def _list_recurse(self, obj):
return [self._flatten(v) for v in obj]
def _get_flattener(self, obj):
list_recurse = self._list_recurse
if util.is_list(obj):
if self._mkref(obj):
return list_recurse
else:
self._push()
return self._getref
# We handle tuples and sets by encoding them in a "(tuple|set)dict"
if util.is_tuple(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]}
if util.is_set(obj):
if not self.unpicklable:
return list_recurse
return lambda obj: {tags.SET: [self._flatten(v) for v in obj]}
if util.is_dictionary(obj):
return self._flatten_dict_obj
if util.is_type(obj):
return _mktyperef
if util.is_object(obj):
return self._ref_obj_instance
if util.is_module_function(obj):
return self._flatten_function
# instance methods, lambdas, old style classes...
self._pickle_warning(obj)
return None
def _ref_obj_instance(self, obj):
"""Reference an existing object or flatten if new"""
if self.unpicklable:
if self._mkref(obj):
# We've never seen this object so return its
# json representation.
return self._flatten_obj_instance(obj)
# We've seen this object before so place an object
# reference tag in the data. This avoids infinite recursion
# when processing cyclical objects.
return self._getref(obj)
else:
max_reached = self._max_reached()
in_cycle = _in_cycle(obj, self._objs, max_reached, False)
if in_cycle:
# A circular becomes None.
return None
self._mkref(obj)
return self._flatten_obj_instance(obj)
def _flatten_file(self, obj):
"""
Special case file objects
"""
assert not PY3 and isinstance(obj, types.FileType)
return None
def _flatten_bytestring(self, obj):
if PY2:
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
pass
return {self._bytes_tag: self._bytes_encoder(obj)}
def _flatten_obj_instance(self, obj):
"""Recursively flatten an instance and return a json-friendly dict"""
data = {}
has_class = hasattr(obj, '__class__')
has_dict = hasattr(obj, '__dict__')
has_slots = not has_dict and hasattr(obj, '__slots__')
has_getnewargs = util.has_method(obj, '__getnewargs__')
has_getnewargs_ex = util.has_method(obj, '__getnewargs_ex__')
has_getinitargs = util.has_method(obj, '__getinitargs__')
has_reduce, has_reduce_ex = util.has_reduce(obj)
# Support objects with __getstate__(); this ensures that
# both __setstate__() and __getstate__() are implemented
has_getstate = hasattr(obj, '__getstate__')
# not using has_method since __getstate__() is handled separately below
if has_class:
cls = obj.__class__
else:
cls = type(obj)
# Check for a custom handler
class_name = util.importable_name(cls)
handler = handlers.get(cls, handlers.get(class_name))
if handler is not None:
if self.unpicklable:
data[tags.OBJECT] = class_name
return handler(self).flatten(obj, data)
reduce_val = None
if self.unpicklable:
if has_reduce and not has_reduce_ex:
try:
reduce_val = obj.__reduce__()
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
# test for a reduce implementation, and redirect before
# doing anything else if that is what reduce requests
elif has_reduce_ex:
try:
# we're implementing protocol 2
reduce_val = obj.__reduce_ex__(2)
except TypeError:
# A lot of builtin types have a reduce which
# just raises a TypeError
# we ignore those
pass
if reduce_val and isinstance(reduce_val, string_types):
try:
varpath = iter(reduce_val.split('.'))
# curmod will be transformed by the
# loop into the value to pickle
curmod = sys.modules[next(varpath)]
for modname in varpath:
curmod = getattr(curmod, modname)
# replace obj with value retrieved
return self._flatten(curmod)
except KeyError:
# well, we can't do anything with that, so we ignore it
pass
elif reduce_val:
# at this point, reduce_val should be some kind of iterable
# pad out to len 5
rv_as_list = list(reduce_val)
insufficiency = 5 - len(rv_as_list)
if insufficiency:
rv_as_list += [None] * insufficiency
if getattr(rv_as_list[0], '__name__', '') == '__newobj__':
rv_as_list[0] = tags.NEWOBJ
f, args, state, listitems, dictitems = rv_as_list
# check that getstate/setstate is sane
if not (
state
and hasattr(obj, '__getstate__')
and not hasattr(obj, '__setstate__')
and not isinstance(obj, dict)
):
# turn iterators to iterables for convenient serialization
if rv_as_list[3]:
rv_as_list[3] = tuple(rv_as_list[3])
if rv_as_list[4]:
rv_as_list[4] = tuple(rv_as_list[4])
reduce_args = list(map(self._flatten, rv_as_list))
last_index = len(reduce_args) - 1
while last_index >= 2 and reduce_args[last_index] is None:
last_index -= 1
data[tags.REDUCE] = reduce_args[: last_index + 1]
return data
if has_class and not util.is_module(obj):
if self.unpicklable:
data[tags.OBJECT] = class_name
if has_getnewargs_ex:
data[tags.NEWARGSEX] = list(map(self._flatten, obj.__getnewargs_ex__()))
if has_getnewargs and not has_getnewargs_ex:
data[tags.NEWARGS] = self._flatten(obj.__getnewargs__())
if has_getinitargs:
data[tags.INITARGS] = self._flatten(obj.__getinitargs__())
if has_getstate:
try:
state = obj.__getstate__()
except TypeError:
# Has getstate but it cannot be called, e.g. file descriptors
# in Python3
self._pickle_warning(obj)
return None
else:
return self._getstate(state, data)
if util.is_module(obj):
if self.unpicklable:
data[tags.REPR] = '{name}/{name}'.format(name=obj.__name__)
else:
data = compat.ustr(obj)
return data
if util.is_dictionary_subclass(obj):
self._flatten_dict_obj(obj, data)
return data
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
if util.is_iterator(obj):
# force list in python 3
data[tags.ITERATOR] = list(map(self._flatten, islice(obj, self._max_iter)))
return data
if has_dict:
# Support objects that subclasses list and set
if util.is_sequence_subclass(obj):
return self._flatten_sequence_obj(obj, data)
# hack for zope persistent objects; this unghostifies the object
getattr(obj, '_', None)
return self._flatten_dict_obj(obj.__dict__, data)
if has_slots:
return self._flatten_newstyle_with_slots(obj, data)
# catchall return for data created above without a return
# (e.g. __getnewargs__ is not supposed to be the end of the story)
if data:
return data
self._pickle_warning(obj)
return None
def _flatten_function(self, obj):
if self.unpicklable:
data = {tags.FUNCTION: util.importable_name(obj)}
else:
data = None
return data
def _flatten_dict_obj(self, obj, data=None):
"""Recursively call flatten() and return json-friendly dict"""
if data is None:
data = obj.__class__()
# If we allow non-string keys then we have to do a two-phase
# encoding to ensure that the reference IDs are deterministic.
if self.keys:
# Phase 1: serialize regular objects, ignore fancy keys.
flatten = self._flatten_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# Phase 2: serialize non-string keys.
flatten = self._flatten_non_string_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
else:
# If we have string keys only then we only need a single pass.
flatten = self._flatten_key_value_pair
for k, v in util.items(obj):
flatten(k, v, data)
# the collections.defaultdict protocol
if hasattr(obj, 'default_factory') and callable(obj.default_factory):
factory = obj.default_factory
if util.is_type(factory):
# Reference the class/type
value = _mktyperef(factory)
else:
# The factory is not a type and could reference e.g. functions
# or even the object instance itself, which creates a cycle.
if self._mkref(factory):
# We've never seen this object before so pickle it in-place.
# Create an instance from the factory and assume that the
# resulting instance is a suitable examplar.
value = self._flatten_obj_instance(handlers.CloneFactory(factory()))
else:
# We've seen this object before.
# Break the cycle by emitting a reference.
value = self._getref(factory)
data['default_factory'] = value
# Sub-classes of dict
if hasattr(obj, '__dict__') and self.unpicklable:
dict_data = {}
self._flatten_dict_obj(obj.__dict__, dict_data)
data['__dict__'] = dict_data
return data
def _flatten_obj_attrs(self, obj, attrs, data):
flatten = self._flatten_key_value_pair
ok = False
for k in attrs:
try:
value = getattr(obj, k)
flatten(k, value, data)
except AttributeError:
# The attribute may have been deleted
continue
ok = True
return ok
def _flatten_newstyle_with_slots(self, obj, data):
"""Return a json-friendly dict for new-style objects with __slots__."""
allslots = [
_wrap_string_slot(getattr(cls, '__slots__', tuple()))
for cls in obj.__class__.mro()
]
if not self._flatten_obj_attrs(obj, chain(*allslots), data):
attrs = [
x for x in dir(obj) if not x.startswith('__') and not x.endswith('__')
]
self._flatten_obj_attrs(obj, attrs, data)
return data
def _flatten_key_value_pair(self, k, v, data):
"""Flatten a key/value pair into the passed-in dictionary."""
if not util.is_picklable(k, v):
return data
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_non_string_key_value_pair(self, k, v, data):
"""Flatten only non-string key/value pairs"""
if not util.is_picklable(k, v):
return data
if self.keys and not isinstance(k, string_types):
k = self._escape_key(k)
data[k] = self._flatten(v)
return data
def _flatten_string_key_value_pair(self, k, v, data):
"""Flatten string key/value pairs only."""
if not util.is_picklable(k, v):
return data
if self.keys:
if not isinstance(k, string_types):
return data
elif k.startswith(tags.JSON_KEY):
k = self._escape_key(k)
else:
if k is None:
k = 'null' # for compatibility with common json encoders
if self.numeric_keys and isinstance(k, numeric_types):
pass
elif not isinstance(k, string_types):
try:
k = repr(k)
except Exception:
k = compat.ustr(k)
data[k] = self._flatten(v)
return data
def _flatten_sequence_obj(self, obj, data):
"""Return a json-friendly dict for a sequence subclass."""
if hasattr(obj, '__dict__'):
self._flatten_dict_obj(obj.__dict__, data)
value = [self._flatten(v) for v in obj]
if self.unpicklable:
data[tags.SEQ] = value
else:
return value
return data
def _escape_key(self, k):
return tags.JSON_KEY + encode(
k,
reset=False,
keys=True,
context=self,
backend=self.backend,
make_refs=self.make_refs,
)
def _getstate(self, obj, data):
state = self._flatten(obj)
if self.unpicklable:
data[tags.STATE] = state
else:
data = state
return data
def _pickle_warning(self, obj):
if self.warn:
msg = 'jsonpickle cannot pickle %r: replaced with None' % obj
warnings.warn(msg)
def _in_cycle(obj, objs, max_reached, make_refs):
"""Detect cyclic structures that would lead to infinite recursion"""
return (
(max_reached or (not make_refs and id(obj) in objs))
and not util.is_primitive(obj)
and not util.is_enum(obj)
)
def _mktyperef(obj):
"""Return a typeref dictionary
>>> _mktyperef(AssertionError) == {'py/type': 'builtins.AssertionError'}
True
"""
return {tags.TYPE: util.importable_name(obj)}
def _wrap_string_slot(string):
"""Converts __slots__ = 'a' into __slots__ = ('a',)"""
if isinstance(string, string_types):
return (string,)
return string
|
download_pdc_id
|
Download a PDC dataset by its PDC study id.
Returns:
pandas.DataFrame: The clinical table for the study id.
pandas.DataFrame: The quantitative table for the study id.
|
# Copyright 2018 Samuel Payne [email protected]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import requests
import shutil
import warnings
import cptac
from cptac.file_download import get_box_token
from cptac.exceptions import DatasetAlreadyInstalledWarning, InvalidParameterError, NoInternetError, PdcDownloadError
from .pancanbrca import SOURCES as BRCA_SOURCES
from .pancanccrcc import SOURCES as CCRCC_SOURCES
from .pancancoad import SOURCES as COAD_SOURCES
from .pancangbm import SOURCES as GBM_SOURCES
from .pancanhnscc import SOURCES as HNSCC_SOURCES
from .pancanlscc import SOURCES as LSCC_SOURCES
from .pancanluad import SOURCES as LUAD_SOURCES
from .pancanov import SOURCES as OV_SOURCES
from .pancanucec import SOURCES as UCEC_SOURCES
from .pancanpdac import SOURCES as PDAC_SOURCES
STUDY_IDS_MAP = {
"pdcbrca": {
"acetylome": "PDC000239", # Prospective Breast BI Acetylome
"phosphoproteome": "PDC000121", # Prospective BRCA Phosphoproteome S039-2
"proteome": "PDC000120", # Prospective BRCA Proteome S039-1
},
"pdcccrcc": {
"phosphoproteome": "PDC000128", # CPTAC CCRCC Discovery Study - Phosphoproteme S044-2
"proteome": "PDC000127", # CPTAC CCRCC Discovery Study - Proteome S044-1
},
"pdccoad": {
"phosphoproteome": "PDC000117", # Prospective COAD Phosphoproteome S037-3
"proteome": "PDC000116", # Prospective COAD Proteome S037-2
},
"pdcgbm": {
"acetylome": "PDC000245", # CPTAC GBM Discovery Study - Acetylome
"phosphoproteome": "PDC000205", # CPTAC GBM Discovery Study - Phosphoproteome
"proteome": "PDC000204", # CPTAC GBM Discovery Study - Proteome
},
"pdchnscc": {
"phosphoproteome": "PDC000222", # CPTAC HNSCC Discovery Study - Phosphoproteome
"proteome": "PDC000221", # CPTAC HNSCC Discovery Study - Proteome
},
"pdclscc": {
"acetylome": "PDC000233", # CPTAC LSCC Discovery Study - Acetylome
"phosphoproteome": "PDC000232", # CPTAC LSCC Discovery Study - Phosphoproteome
"proteome": "PDC000234", # CPTAC LSCC Discovery Study - Proteome
"ubiquitylome": "PDC000237", # CPTAC LSCC Discovery Study - Ubiquitylome
},
"pdcluad": {
"acetylome": "PDC000224", # CPTAC LUAD Discovery Study - Acetylome
"phosphoproteome": "PDC000149", # CPTAC LUAD Discovery Study - Phosphoproteome
"proteome": "PDC000153", # CPTAC LUAD Discovery Study - Proteome
},
"pdcov": {
"phosphoproteome": "PDC000119", # Prospective OV Phosphoproteome S038-3
"proteome": "PDC000118", # Prospective OV Proteome S038-2
},
"pdcpdac": {
"proteome": "PDC000270", # CPTAC PDAC Discovery Study - Proteome
"phosphoproteome": "PDC000271", # CPTAC PDAC Discovery Study - Phosphoproteome
},
"pdcucec": {
"acetylome": "PDC000226", # CPTAC UCEC Discovery Study - Acetylome
"phosphoproteome": "PDC000126", # UCEC Discovery - Phosphoproteome S043-2
"proteome": "PDC000125", # UCEC Discovery - Proteome S043-1
},
}
def download(dataset, version="latest", redownload=False):
dataset = dataset.lower()
if dataset.startswith("pdc"):
box_token = get_box_token()
if dataset != 'pdcbrca': # pdcbrca is the only dataset that doesn't need a mapping file for PDC
mapping = cptac.download(dataset, version=version, redownload=redownload, _box_auth=True, _box_token=box_token) # download helper file for mapping aliquots to patient IDs
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics and mapping:
return True
else:
return False
else: # pdcbrca only needs omics
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics:
return True
else:
return False
elif dataset.startswith("pancan") or dataset == "all":
box_token = get_box_token()
if dataset == "pancanbrca":
sources = BRCA_SOURCES
elif dataset == "pancanccrcc":
sources = CCRCC_SOURCES
elif dataset == "pancancoad":
sources = COAD_SOURCES
elif dataset == "pancangbm":
sources = GBM_SOURCES
elif dataset == "pancanhnscc":
sources = HNSCC_SOURCES
elif dataset == "pancanlscc":
sources = LSCC_SOURCES
elif dataset == "pancanluad":
sources = LUAD_SOURCES
elif dataset == "pancanov":
sources = OV_SOURCES
elif dataset == "pancanucec":
sources = UCEC_SOURCES
elif dataset == "pancanpdac":
sources = PDAC_SOURCES
elif dataset == "all":
sources = sorted(set(BRCA_SOURCES + CCRCC_SOURCES + COAD_SOURCES + GBM_SOURCES + HNSCC_SOURCES + LSCC_SOURCES + LUAD_SOURCES + OV_SOURCES + UCEC_SOURCES + PDAC_SOURCES))
else:
raise InvalidParameterError(f"{dataset} is not a valid dataset.")
overall_success = True
for source in sources:
if source.startswith("pdc"):
single_success = download(source, version=version, redownload=redownload)
else:
single_success = cptac.download(source, version=version, redownload=redownload, _box_auth=True, _box_token=box_token)
if not single_success:
overall_success = False
return overall_success
else:
return cptac.download(dataset, version=version, redownload=redownload, _box_auth=True)
# MASKED: download_pdc_id function (lines 147-204)
def list_pdc_datasets():
for dataset in STUDY_IDS_MAP.keys():
print(f"Pdc{dataset[3:].title()}:")
for data_type in STUDY_IDS_MAP[dataset].keys():
print(f"\t{data_type}: {STUDY_IDS_MAP[dataset][data_type]}")
# Helper functions
def _pdc_download(dataset, version, redownload):
"""Download data for the specified cancer type from the PDC."""
dataset = str.lower(dataset)
if dataset == "pdcall":
overall_result = True
for dataset in STUDY_IDS_MAP.keys():
if not pdc_download(dataset, version, redownload):
overall_result = False
return overall_result
if not dataset.startswith("pdc"):
raise InvalidParameterError(f"pdc_download function can only be used for PDC datasets, which start with the prefix 'pdc'. You tried to download '{dataset}'.")
if dataset not in STUDY_IDS_MAP.keys():
raise InvalidParameterError(f"PDC dataset must be one of the following:\n{list(STUDY_IDS_MAP.keys())}\nYou passed '{dataset}'.")
dataset_ids = STUDY_IDS_MAP[dataset]
# Get the directory to where to store the data, and see if it exists
path_here = os.path.abspath(os.path.dirname(__file__))
cancer_dir = os.path.join(path_here, f"data_{dataset}")
if os.path.isdir(cancer_dir):
index_path = os.path.join(cancer_dir, "index.txt")
# Check that they also have the index
if not os.path.isfile(index_path):
redownload = True
else:
# The PDC doesn't have a versioning scheme for the tables they serve, so originally we just called it version 0.0 but later decided it would be better to call it 1.0. So, check if theirs is called 0.0; if so, replace it with 1.0.
with open(index_path, "r") as index_file:
first_line = index_file.readline()
if first_line.startswith("#0.0"):
redownload=True
if redownload:
shutil.rmtree(cancer_dir)
else:
return True
os.mkdir(cancer_dir)
data_dir = os.path.join(cancer_dir, f"{dataset}_v1.0")
os.mkdir(data_dir)
# We'll combine all the clinical tables in case there are differences
master_clin = pd.DataFrame()
for data_type in dataset_ids.keys():
# Print an update
download_msg = f"Downloading {dataset} {data_type} files..."
print(download_msg, end="\r")
# Get the clinical and quantitative tables for the study ID
clin, quant = download_pdc_id(dataset_ids[data_type], _download_msg=False)
# Print a new update
print(" " * len(download_msg), end="\r")
save_msg = f"Saving {dataset} {data_type} files..."
print(save_msg, end="\r")
# Append the clinical dataframe
master_clin = master_clin.append(clin)
# Save the quantitative table
quant.to_csv(os.path.join(data_dir, f"{data_type}.tsv.gz"), sep="\t")
# Erase update
print(" " * len(save_msg), end="\r")
# Print an update
save_msg = f"Saving {dataset} clinical file..."
print(save_msg, end="\r")
# Drop any duplicated rows in combined clinical table, then save it too
master_clin = master_clin.drop_duplicates(keep="first")
master_clin.to_csv(os.path.join(data_dir, "clinical.tsv.gz"), sep="\t")
# Write a dummy index with just version numbers
index_path = os.path.join(cancer_dir, "index.txt")
with open(index_path, "w") as index_file:
index_file.write("#1.0\n")
# Erase update
print(" " * len(save_msg), end="\r")
return True
def _download_study_clin(pdc_study_id):
"""Download PDC clinical data for a particular study."""
clinical_query = '''
query {
clinicalPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
age_at_diagnosis, ajcc_clinical_m, ajcc_clinical_n, ajcc_clinical_stage, ajcc_clinical_t, ajcc_pathologic_m,
ajcc_pathologic_n, ajcc_pathologic_stage, ajcc_pathologic_t, ann_arbor_b_symptoms, ann_arbor_clinical_stage,
ann_arbor_extranodal_involvement, ann_arbor_pathologic_stage, best_overall_response, burkitt_lymphoma_clinical_variant,
case_id, case_submitter_id, cause_of_death, circumferential_resection_margin, classification_of_tumor, colon_polyps_history,
days_to_best_overall_response, days_to_birth, days_to_death, days_to_diagnosis, days_to_hiv_diagnosis, days_to_last_follow_up,
days_to_last_known_disease_status, days_to_new_event, days_to_recurrence, demographic_id, demographic_submitter_id,
diagnosis_id, diagnosis_submitter_id, disease_type, ethnicity, figo_stage, gender, hiv_positive, hpv_positive_type, hpv_status,
icd_10_code, iss_stage, last_known_disease_status, laterality, ldh_level_at_diagnosis, ldh_normal_range_upper,
lymphatic_invasion_present, lymph_nodes_positive, method_of_diagnosis, morphology, new_event_anatomic_site, new_event_type,
overall_survival, perineural_invasion_present, primary_diagnosis, primary_site, prior_malignancy, prior_treatment,
progression_free_survival, progression_free_survival_event, progression_or_recurrence, race, residual_disease,
site_of_resection_or_biopsy, status, synchronous_malignancy, tissue_or_organ_of_origin, tumor_cell_content, tumor_grade,
tumor_stage, vascular_invasion_present, vital_status, year_of_birth, year_of_death, year_of_diagnosis
}
}
'''
result_json = _query_pdc(clinical_query)
result_df = pd.\
DataFrame(result_json["data"]["clinicalPerStudy"])
return result_df
def _download_study_biospecimen(pdc_study_id):
"""Download PDC biospecimen data for a particular study."""
biospecimen_query = '''
query {
biospecimenPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
aliquot_submitter_id
case_submitter_id
}
}
'''
result_json = _query_pdc(biospecimen_query)
result_df = pd.\
DataFrame(result_json["data"]["biospecimenPerStudy"])
return result_df
def _download_study_quant(pdc_study_id):
"""Download PDC quantitative data for a particular study."""
proteome_query = '''
query {
quantDataMatrix(pdc_study_id: "''' + pdc_study_id + '''", data_type: "log2_ratio", acceptDUA: true)
}
'''
result_json = _query_pdc(proteome_query)
result_df = pd.DataFrame(result_json["data"]["quantDataMatrix"])
if result_df.shape[1] != 0:
result_df = result_df.set_index(result_df.columns[0]).transpose()
else:
raise PdcDownloadError(f"quantDataMatrix table returned for PDC study ID {pdc_study_id} was empty.")
return result_df
def _query_pdc(query):
"""Send a GraphQL query to the PDC and return the results."""
url = 'https://pdc.cancer.gov/graphql'
try:
response = requests.post(url, json={'query': query})
response.raise_for_status() # Raises a requests.HTTPError if the response code was unsuccessful
except requests.RequestException: # Parent class for all exceptions in the requests module
raise NoInternetError("Insufficient internet. Check your internet connection.") from None
return response.json()
def _check_ids_match(ids_map):
"""Check that the ids in the download function's STUDY_IDS_MAP match up."""
for cancer in ids_map.values():
for data in cancer.values():
pdc_study_id = data["pdc_study_id"]
study_submitter_id = data["study_submitter_id"]
query = '''
query {
study (pdc_study_id: "''' + pdc_study_id + '''" acceptDUA: true) {
pdc_study_id,
study_submitter_id
}
}
'''
idres = _query_pdc(query)
server_psi = idres["data"]["study"][0]["pdc_study_id"]
server_ssi = idres["data"]["study"][0]["study_submitter_id"]
assert server_psi == pdc_study_id
assert server_ssi == study_submitter_id
print(f"{server_psi} == {pdc_study_id}")
print(f"{server_ssi} == {study_submitter_id}")
print()
|
def download_pdc_id(pdc_id, _download_msg=True):
"""Download a PDC dataset by its PDC study id.
Returns:
pandas.DataFrame: The clinical table for the study id.
pandas.DataFrame: The quantitative table for the study id.
"""
if _download_msg:
clin_msg = f"Downloading clinical table for {pdc_id}..."
print(clin_msg, end="\r")
# Download the clinical table
clin = _download_study_clin(pdc_id).\
set_index("case_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(clin_msg), end="\r")
bio_msg = f"Downloading biospecimenPerStudy table for {pdc_id}..."
print(bio_msg, end="\r")
# The the biospecimenPerStudy table, which has both patient IDs and aliquot IDs
bio = _download_study_biospecimen(pdc_id).\
set_index("aliquot_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(bio_msg), end="\r")
quant_msg = f"Downloading quantitative table for {pdc_id}..."
print(quant_msg, end="\r")
# Get the quantitative data table
quant = _download_study_quant(pdc_id)
if _download_msg:
print(" " * len(quant_msg), end="\r")
format_msg = f"Formatting tables for {pdc_id}..."
print(format_msg, end="\r")
# Join the patient IDs from the biospecimenPerStudy table into the quant table
quant = quant.\
assign(aliquot_submitter_id=quant.iloc[:, 0].str.split(":", n=1, expand=True)[1]).\
drop(columns=quant.columns[0]).\
set_index("aliquot_submitter_id").\
sort_index()
quant = bio.\
join(quant, how="inner").\
reset_index().\
set_index(["case_submitter_id", "aliquot_submitter_id"]).\
sort_index()
# Clear message
if _download_msg:
print(" " * len(format_msg), end="\r")
return clin, quant
| 147 | 204 |
# Copyright 2018 Samuel Payne [email protected]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import requests
import shutil
import warnings
import cptac
from cptac.file_download import get_box_token
from cptac.exceptions import DatasetAlreadyInstalledWarning, InvalidParameterError, NoInternetError, PdcDownloadError
from .pancanbrca import SOURCES as BRCA_SOURCES
from .pancanccrcc import SOURCES as CCRCC_SOURCES
from .pancancoad import SOURCES as COAD_SOURCES
from .pancangbm import SOURCES as GBM_SOURCES
from .pancanhnscc import SOURCES as HNSCC_SOURCES
from .pancanlscc import SOURCES as LSCC_SOURCES
from .pancanluad import SOURCES as LUAD_SOURCES
from .pancanov import SOURCES as OV_SOURCES
from .pancanucec import SOURCES as UCEC_SOURCES
from .pancanpdac import SOURCES as PDAC_SOURCES
STUDY_IDS_MAP = {
"pdcbrca": {
"acetylome": "PDC000239", # Prospective Breast BI Acetylome
"phosphoproteome": "PDC000121", # Prospective BRCA Phosphoproteome S039-2
"proteome": "PDC000120", # Prospective BRCA Proteome S039-1
},
"pdcccrcc": {
"phosphoproteome": "PDC000128", # CPTAC CCRCC Discovery Study - Phosphoproteme S044-2
"proteome": "PDC000127", # CPTAC CCRCC Discovery Study - Proteome S044-1
},
"pdccoad": {
"phosphoproteome": "PDC000117", # Prospective COAD Phosphoproteome S037-3
"proteome": "PDC000116", # Prospective COAD Proteome S037-2
},
"pdcgbm": {
"acetylome": "PDC000245", # CPTAC GBM Discovery Study - Acetylome
"phosphoproteome": "PDC000205", # CPTAC GBM Discovery Study - Phosphoproteome
"proteome": "PDC000204", # CPTAC GBM Discovery Study - Proteome
},
"pdchnscc": {
"phosphoproteome": "PDC000222", # CPTAC HNSCC Discovery Study - Phosphoproteome
"proteome": "PDC000221", # CPTAC HNSCC Discovery Study - Proteome
},
"pdclscc": {
"acetylome": "PDC000233", # CPTAC LSCC Discovery Study - Acetylome
"phosphoproteome": "PDC000232", # CPTAC LSCC Discovery Study - Phosphoproteome
"proteome": "PDC000234", # CPTAC LSCC Discovery Study - Proteome
"ubiquitylome": "PDC000237", # CPTAC LSCC Discovery Study - Ubiquitylome
},
"pdcluad": {
"acetylome": "PDC000224", # CPTAC LUAD Discovery Study - Acetylome
"phosphoproteome": "PDC000149", # CPTAC LUAD Discovery Study - Phosphoproteome
"proteome": "PDC000153", # CPTAC LUAD Discovery Study - Proteome
},
"pdcov": {
"phosphoproteome": "PDC000119", # Prospective OV Phosphoproteome S038-3
"proteome": "PDC000118", # Prospective OV Proteome S038-2
},
"pdcpdac": {
"proteome": "PDC000270", # CPTAC PDAC Discovery Study - Proteome
"phosphoproteome": "PDC000271", # CPTAC PDAC Discovery Study - Phosphoproteome
},
"pdcucec": {
"acetylome": "PDC000226", # CPTAC UCEC Discovery Study - Acetylome
"phosphoproteome": "PDC000126", # UCEC Discovery - Phosphoproteome S043-2
"proteome": "PDC000125", # UCEC Discovery - Proteome S043-1
},
}
def download(dataset, version="latest", redownload=False):
dataset = dataset.lower()
if dataset.startswith("pdc"):
box_token = get_box_token()
if dataset != 'pdcbrca': # pdcbrca is the only dataset that doesn't need a mapping file for PDC
mapping = cptac.download(dataset, version=version, redownload=redownload, _box_auth=True, _box_token=box_token) # download helper file for mapping aliquots to patient IDs
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics and mapping:
return True
else:
return False
else: # pdcbrca only needs omics
omics = _pdc_download(dataset, version=version, redownload=redownload)
if omics:
return True
else:
return False
elif dataset.startswith("pancan") or dataset == "all":
box_token = get_box_token()
if dataset == "pancanbrca":
sources = BRCA_SOURCES
elif dataset == "pancanccrcc":
sources = CCRCC_SOURCES
elif dataset == "pancancoad":
sources = COAD_SOURCES
elif dataset == "pancangbm":
sources = GBM_SOURCES
elif dataset == "pancanhnscc":
sources = HNSCC_SOURCES
elif dataset == "pancanlscc":
sources = LSCC_SOURCES
elif dataset == "pancanluad":
sources = LUAD_SOURCES
elif dataset == "pancanov":
sources = OV_SOURCES
elif dataset == "pancanucec":
sources = UCEC_SOURCES
elif dataset == "pancanpdac":
sources = PDAC_SOURCES
elif dataset == "all":
sources = sorted(set(BRCA_SOURCES + CCRCC_SOURCES + COAD_SOURCES + GBM_SOURCES + HNSCC_SOURCES + LSCC_SOURCES + LUAD_SOURCES + OV_SOURCES + UCEC_SOURCES + PDAC_SOURCES))
else:
raise InvalidParameterError(f"{dataset} is not a valid dataset.")
overall_success = True
for source in sources:
if source.startswith("pdc"):
single_success = download(source, version=version, redownload=redownload)
else:
single_success = cptac.download(source, version=version, redownload=redownload, _box_auth=True, _box_token=box_token)
if not single_success:
overall_success = False
return overall_success
else:
return cptac.download(dataset, version=version, redownload=redownload, _box_auth=True)
def download_pdc_id(pdc_id, _download_msg=True):
"""Download a PDC dataset by its PDC study id.
Returns:
pandas.DataFrame: The clinical table for the study id.
pandas.DataFrame: The quantitative table for the study id.
"""
if _download_msg:
clin_msg = f"Downloading clinical table for {pdc_id}..."
print(clin_msg, end="\r")
# Download the clinical table
clin = _download_study_clin(pdc_id).\
set_index("case_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(clin_msg), end="\r")
bio_msg = f"Downloading biospecimenPerStudy table for {pdc_id}..."
print(bio_msg, end="\r")
# The the biospecimenPerStudy table, which has both patient IDs and aliquot IDs
bio = _download_study_biospecimen(pdc_id).\
set_index("aliquot_submitter_id").\
sort_index()
if _download_msg:
print(" " * len(bio_msg), end="\r")
quant_msg = f"Downloading quantitative table for {pdc_id}..."
print(quant_msg, end="\r")
# Get the quantitative data table
quant = _download_study_quant(pdc_id)
if _download_msg:
print(" " * len(quant_msg), end="\r")
format_msg = f"Formatting tables for {pdc_id}..."
print(format_msg, end="\r")
# Join the patient IDs from the biospecimenPerStudy table into the quant table
quant = quant.\
assign(aliquot_submitter_id=quant.iloc[:, 0].str.split(":", n=1, expand=True)[1]).\
drop(columns=quant.columns[0]).\
set_index("aliquot_submitter_id").\
sort_index()
quant = bio.\
join(quant, how="inner").\
reset_index().\
set_index(["case_submitter_id", "aliquot_submitter_id"]).\
sort_index()
# Clear message
if _download_msg:
print(" " * len(format_msg), end="\r")
return clin, quant
def list_pdc_datasets():
for dataset in STUDY_IDS_MAP.keys():
print(f"Pdc{dataset[3:].title()}:")
for data_type in STUDY_IDS_MAP[dataset].keys():
print(f"\t{data_type}: {STUDY_IDS_MAP[dataset][data_type]}")
# Helper functions
def _pdc_download(dataset, version, redownload):
"""Download data for the specified cancer type from the PDC."""
dataset = str.lower(dataset)
if dataset == "pdcall":
overall_result = True
for dataset in STUDY_IDS_MAP.keys():
if not pdc_download(dataset, version, redownload):
overall_result = False
return overall_result
if not dataset.startswith("pdc"):
raise InvalidParameterError(f"pdc_download function can only be used for PDC datasets, which start with the prefix 'pdc'. You tried to download '{dataset}'.")
if dataset not in STUDY_IDS_MAP.keys():
raise InvalidParameterError(f"PDC dataset must be one of the following:\n{list(STUDY_IDS_MAP.keys())}\nYou passed '{dataset}'.")
dataset_ids = STUDY_IDS_MAP[dataset]
# Get the directory to where to store the data, and see if it exists
path_here = os.path.abspath(os.path.dirname(__file__))
cancer_dir = os.path.join(path_here, f"data_{dataset}")
if os.path.isdir(cancer_dir):
index_path = os.path.join(cancer_dir, "index.txt")
# Check that they also have the index
if not os.path.isfile(index_path):
redownload = True
else:
# The PDC doesn't have a versioning scheme for the tables they serve, so originally we just called it version 0.0 but later decided it would be better to call it 1.0. So, check if theirs is called 0.0; if so, replace it with 1.0.
with open(index_path, "r") as index_file:
first_line = index_file.readline()
if first_line.startswith("#0.0"):
redownload=True
if redownload:
shutil.rmtree(cancer_dir)
else:
return True
os.mkdir(cancer_dir)
data_dir = os.path.join(cancer_dir, f"{dataset}_v1.0")
os.mkdir(data_dir)
# We'll combine all the clinical tables in case there are differences
master_clin = pd.DataFrame()
for data_type in dataset_ids.keys():
# Print an update
download_msg = f"Downloading {dataset} {data_type} files..."
print(download_msg, end="\r")
# Get the clinical and quantitative tables for the study ID
clin, quant = download_pdc_id(dataset_ids[data_type], _download_msg=False)
# Print a new update
print(" " * len(download_msg), end="\r")
save_msg = f"Saving {dataset} {data_type} files..."
print(save_msg, end="\r")
# Append the clinical dataframe
master_clin = master_clin.append(clin)
# Save the quantitative table
quant.to_csv(os.path.join(data_dir, f"{data_type}.tsv.gz"), sep="\t")
# Erase update
print(" " * len(save_msg), end="\r")
# Print an update
save_msg = f"Saving {dataset} clinical file..."
print(save_msg, end="\r")
# Drop any duplicated rows in combined clinical table, then save it too
master_clin = master_clin.drop_duplicates(keep="first")
master_clin.to_csv(os.path.join(data_dir, "clinical.tsv.gz"), sep="\t")
# Write a dummy index with just version numbers
index_path = os.path.join(cancer_dir, "index.txt")
with open(index_path, "w") as index_file:
index_file.write("#1.0\n")
# Erase update
print(" " * len(save_msg), end="\r")
return True
def _download_study_clin(pdc_study_id):
"""Download PDC clinical data for a particular study."""
clinical_query = '''
query {
clinicalPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
age_at_diagnosis, ajcc_clinical_m, ajcc_clinical_n, ajcc_clinical_stage, ajcc_clinical_t, ajcc_pathologic_m,
ajcc_pathologic_n, ajcc_pathologic_stage, ajcc_pathologic_t, ann_arbor_b_symptoms, ann_arbor_clinical_stage,
ann_arbor_extranodal_involvement, ann_arbor_pathologic_stage, best_overall_response, burkitt_lymphoma_clinical_variant,
case_id, case_submitter_id, cause_of_death, circumferential_resection_margin, classification_of_tumor, colon_polyps_history,
days_to_best_overall_response, days_to_birth, days_to_death, days_to_diagnosis, days_to_hiv_diagnosis, days_to_last_follow_up,
days_to_last_known_disease_status, days_to_new_event, days_to_recurrence, demographic_id, demographic_submitter_id,
diagnosis_id, diagnosis_submitter_id, disease_type, ethnicity, figo_stage, gender, hiv_positive, hpv_positive_type, hpv_status,
icd_10_code, iss_stage, last_known_disease_status, laterality, ldh_level_at_diagnosis, ldh_normal_range_upper,
lymphatic_invasion_present, lymph_nodes_positive, method_of_diagnosis, morphology, new_event_anatomic_site, new_event_type,
overall_survival, perineural_invasion_present, primary_diagnosis, primary_site, prior_malignancy, prior_treatment,
progression_free_survival, progression_free_survival_event, progression_or_recurrence, race, residual_disease,
site_of_resection_or_biopsy, status, synchronous_malignancy, tissue_or_organ_of_origin, tumor_cell_content, tumor_grade,
tumor_stage, vascular_invasion_present, vital_status, year_of_birth, year_of_death, year_of_diagnosis
}
}
'''
result_json = _query_pdc(clinical_query)
result_df = pd.\
DataFrame(result_json["data"]["clinicalPerStudy"])
return result_df
def _download_study_biospecimen(pdc_study_id):
"""Download PDC biospecimen data for a particular study."""
biospecimen_query = '''
query {
biospecimenPerStudy(pdc_study_id: "''' + pdc_study_id + '''", acceptDUA: true) {
aliquot_submitter_id
case_submitter_id
}
}
'''
result_json = _query_pdc(biospecimen_query)
result_df = pd.\
DataFrame(result_json["data"]["biospecimenPerStudy"])
return result_df
def _download_study_quant(pdc_study_id):
"""Download PDC quantitative data for a particular study."""
proteome_query = '''
query {
quantDataMatrix(pdc_study_id: "''' + pdc_study_id + '''", data_type: "log2_ratio", acceptDUA: true)
}
'''
result_json = _query_pdc(proteome_query)
result_df = pd.DataFrame(result_json["data"]["quantDataMatrix"])
if result_df.shape[1] != 0:
result_df = result_df.set_index(result_df.columns[0]).transpose()
else:
raise PdcDownloadError(f"quantDataMatrix table returned for PDC study ID {pdc_study_id} was empty.")
return result_df
def _query_pdc(query):
"""Send a GraphQL query to the PDC and return the results."""
url = 'https://pdc.cancer.gov/graphql'
try:
response = requests.post(url, json={'query': query})
response.raise_for_status() # Raises a requests.HTTPError if the response code was unsuccessful
except requests.RequestException: # Parent class for all exceptions in the requests module
raise NoInternetError("Insufficient internet. Check your internet connection.") from None
return response.json()
def _check_ids_match(ids_map):
"""Check that the ids in the download function's STUDY_IDS_MAP match up."""
for cancer in ids_map.values():
for data in cancer.values():
pdc_study_id = data["pdc_study_id"]
study_submitter_id = data["study_submitter_id"]
query = '''
query {
study (pdc_study_id: "''' + pdc_study_id + '''" acceptDUA: true) {
pdc_study_id,
study_submitter_id
}
}
'''
idres = _query_pdc(query)
server_psi = idres["data"]["study"][0]["pdc_study_id"]
server_ssi = idres["data"]["study"][0]["study_submitter_id"]
assert server_psi == pdc_study_id
assert server_ssi == study_submitter_id
print(f"{server_psi} == {pdc_study_id}")
print(f"{server_ssi} == {study_submitter_id}")
print()
|
run_on_kubernetes
|
Run dask function inside a pod using the given config.
Create a pod, using the local kubernetes configuration that starts a Dask Cluster
using dask-kubernetes and runs a function specified within the `config` dictionary.
Args:
config (dict):
Config dictionary.
namespace (str):
Kubernetes namespace were the pod will be created.
|
# -*- coding: utf-8 -*-
import argparse
import importlib
import json
import logging
import os
import re
import sys
from io import StringIO
import boto3
import tabulate
import yaml
from dask.distributed import Client
from dask_kubernetes import KubeCluster
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.config import load_kube_config
RUN_TEMPLATE = """
/bin/bash <<'EOF'
{}
EOF
"""
CONFIG_TEMPLATE = """
cat > config.json << JSON
{}
JSON
"""
WORKER_COMM = '/usr/bin/prepare.sh dask-worker --no-dashboard --memory-limit 0 --death-timeout 0'
def _import_function(config):
function = config['function']
function = function.split('.')
function_name = function[-1]
package = '.'.join(function[:-1])
module = importlib.import_module(package)
return getattr(module, function_name)
def _get_extra_setup(setup_dict):
extra_packages = []
script = setup_dict.get('script')
if script:
extra_packages.append('exec {}'.format(script))
apt_packages = setup_dict.get('apt_packages')
if apt_packages:
extra_packages.append('apt get install {}'.format(' '.join(apt_packages)))
pip_packages = setup_dict.get('pip_packages')
if pip_packages:
extra_packages.append('pip install {}'.format(' '.join(pip_packages)))
git_repository = setup_dict.get('git_repository')
if git_repository:
url = git_repository.get('url')
reference = git_repository.get('reference', 'master')
install = git_repository.get('install')
git_clone = 'git clone {} repo && cd repo'.format(url)
git_checkout = 'git checkout {}'.format(reference)
extra_packages.append('\n '.join([git_clone, git_checkout, install]))
if len(extra_packages) > 1:
return '\n '.join(extra_packages)
return extra_packages[0]
def _generate_cluster_spec(config, kubernetes=False):
extra_setup = ''
dask_cluster = config['dask_cluster']
metadata = {}
worker_config = dask_cluster.get('worker_config')
if worker_config.get('setup'):
extra_setup = _get_extra_setup(worker_config['setup'])
if kubernetes:
name = worker_config.get('image', 'daskdev/dask:latest')
name = '{}-'.format(re.sub(r'[\W_]', '-', name))
metadata['generateName'] = name
config_command = CONFIG_TEMPLATE.format(json.dumps(config))
run_command = 'python -u -m btb_benchmark.kubernetes config.json'
extra_setup = '\n'.join([extra_setup, config_command, run_command])
else:
run_command = WORKER_COMM
extra_setup = '\n'.join([extra_setup, run_command])
run_commands = RUN_TEMPLATE.format(extra_setup)
spec = {
'metadata': metadata,
'spec': {
'restartPolicy': 'Never',
'containers': [{
'args': ['-c', run_commands],
'command': ['tini', '-g', '--', '/bin/sh'],
'image': worker_config.get('image', 'daskdev/dask:latest'),
'name': 'dask-worker',
'resources': worker_config.get('resources', {})
}]
}
}
return spec
def _df_to_csv_str(df):
with StringIO() as sio:
df.to_csv(sio)
return sio.getvalue()
def _upload_to_s3(bucket, path, results, aws_key=None, aws_secret=None):
client = boto3.client('s3', aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
client.put_object(Bucket=bucket, Key=path, Body=_df_to_csv_str(results))
def run_dask_function(config):
"""Start a Dask Cluster using dask-kubernetes and run a function.
Talks to kubernetes to create `n` amount of new `pods` with a dask worker inside of each
forming a `dask` cluster. Then, a function specified from `config` is being imported and
run with the given arguments. The tasks created by this `function` are being run on the
`dask` cluster for distributed computation.
The config dict must contain the following sections:
* run
* dask_cluster
* output
Args:
config (dict):
Config dictionary.
"""
output_conf = config.get('output')
if output_conf:
path = output_conf.get('path')
if not path:
raise ValueError('An output path must be provided when providing `output`.')
cluster_spec = _generate_cluster_spec(config, kubernetes=False)
cluster = KubeCluster.from_dict(cluster_spec)
workers = config['dask_cluster'].get('workers')
if not workers:
cluster.adapt()
elif isinstance(workers, int):
cluster.scale(workers)
else:
cluster.adapt(**workers)
client = Client(cluster)
client.get_versions(check=True)
try:
run = _import_function(config['run'])
kwargs = config['run']['args']
results = run(**kwargs)
finally:
client.close()
cluster.close()
if output_conf:
bucket = output_conf.get('bucket')
try:
if bucket:
aws_key = output_conf.get('key')
aws_secret = output_conf.get('secret_key')
_upload_to_s3(bucket, path, results, aws_key, aws_secret)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
results.to_csv(path)
except Exception:
print('Error storing results. Falling back to console dump.')
print(_df_to_csv_str(results))
else:
return results
# MASKED: run_on_kubernetes function (lines 197-218)
def _get_parser():
parser = argparse.ArgumentParser(description='Run on Kubernetes Command Line Interface')
parser.add_argument('config', help='Path to the JSON config file.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Be verbose. Use -vv for increased verbosity.')
parser.add_argument('--create-pod', action='store_true',
help='Create a master pod and run the given `config` from there.')
parser.add_argument('-n', '--namespace', default='default',
help='Namespace were the pod will be created.')
return parser
def main():
# Parse args
parser = _get_parser()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
# Logger setup
log_level = (3 - args.verbose) * 10
fmt = '%(asctime)s - %(process)d - %(levelname)s - %(name)s - %(module)s - %(message)s'
logging.basicConfig(level=log_level, format=fmt)
with open(args.config) as config_file:
if args.config.endswith('yaml') or args.config.endswith('yml'):
config = yaml.safe_load(config_file)
else:
config = json.load(config_file)
if args.create_pod:
run_on_kubernetes(config, args.namespace)
else:
results = run_dask_function(config)
if results is not None:
print(tabulate.tabulate(
results,
tablefmt='github',
headers=results.columns
))
if __name__ == '__main__':
main()
|
def run_on_kubernetes(config, namespace='default'):
"""Run dask function inside a pod using the given config.
Create a pod, using the local kubernetes configuration that starts a Dask Cluster
using dask-kubernetes and runs a function specified within the `config` dictionary.
Args:
config (dict):
Config dictionary.
namespace (str):
Kubernetes namespace were the pod will be created.
"""
# read local config
load_kube_config()
c = Configuration()
Configuration.set_default(c)
# create client and create pod on default namespace
core_v1 = core_v1_api.CoreV1Api()
spec = _generate_cluster_spec(config, kubernetes=True)
core_v1.create_namespaced_pod(body=spec, namespace=namespace)
print('Pod created.')
| 197 | 218 |
# -*- coding: utf-8 -*-
import argparse
import importlib
import json
import logging
import os
import re
import sys
from io import StringIO
import boto3
import tabulate
import yaml
from dask.distributed import Client
from dask_kubernetes import KubeCluster
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.config import load_kube_config
RUN_TEMPLATE = """
/bin/bash <<'EOF'
{}
EOF
"""
CONFIG_TEMPLATE = """
cat > config.json << JSON
{}
JSON
"""
WORKER_COMM = '/usr/bin/prepare.sh dask-worker --no-dashboard --memory-limit 0 --death-timeout 0'
def _import_function(config):
function = config['function']
function = function.split('.')
function_name = function[-1]
package = '.'.join(function[:-1])
module = importlib.import_module(package)
return getattr(module, function_name)
def _get_extra_setup(setup_dict):
extra_packages = []
script = setup_dict.get('script')
if script:
extra_packages.append('exec {}'.format(script))
apt_packages = setup_dict.get('apt_packages')
if apt_packages:
extra_packages.append('apt get install {}'.format(' '.join(apt_packages)))
pip_packages = setup_dict.get('pip_packages')
if pip_packages:
extra_packages.append('pip install {}'.format(' '.join(pip_packages)))
git_repository = setup_dict.get('git_repository')
if git_repository:
url = git_repository.get('url')
reference = git_repository.get('reference', 'master')
install = git_repository.get('install')
git_clone = 'git clone {} repo && cd repo'.format(url)
git_checkout = 'git checkout {}'.format(reference)
extra_packages.append('\n '.join([git_clone, git_checkout, install]))
if len(extra_packages) > 1:
return '\n '.join(extra_packages)
return extra_packages[0]
def _generate_cluster_spec(config, kubernetes=False):
extra_setup = ''
dask_cluster = config['dask_cluster']
metadata = {}
worker_config = dask_cluster.get('worker_config')
if worker_config.get('setup'):
extra_setup = _get_extra_setup(worker_config['setup'])
if kubernetes:
name = worker_config.get('image', 'daskdev/dask:latest')
name = '{}-'.format(re.sub(r'[\W_]', '-', name))
metadata['generateName'] = name
config_command = CONFIG_TEMPLATE.format(json.dumps(config))
run_command = 'python -u -m btb_benchmark.kubernetes config.json'
extra_setup = '\n'.join([extra_setup, config_command, run_command])
else:
run_command = WORKER_COMM
extra_setup = '\n'.join([extra_setup, run_command])
run_commands = RUN_TEMPLATE.format(extra_setup)
spec = {
'metadata': metadata,
'spec': {
'restartPolicy': 'Never',
'containers': [{
'args': ['-c', run_commands],
'command': ['tini', '-g', '--', '/bin/sh'],
'image': worker_config.get('image', 'daskdev/dask:latest'),
'name': 'dask-worker',
'resources': worker_config.get('resources', {})
}]
}
}
return spec
def _df_to_csv_str(df):
with StringIO() as sio:
df.to_csv(sio)
return sio.getvalue()
def _upload_to_s3(bucket, path, results, aws_key=None, aws_secret=None):
client = boto3.client('s3', aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
client.put_object(Bucket=bucket, Key=path, Body=_df_to_csv_str(results))
def run_dask_function(config):
"""Start a Dask Cluster using dask-kubernetes and run a function.
Talks to kubernetes to create `n` amount of new `pods` with a dask worker inside of each
forming a `dask` cluster. Then, a function specified from `config` is being imported and
run with the given arguments. The tasks created by this `function` are being run on the
`dask` cluster for distributed computation.
The config dict must contain the following sections:
* run
* dask_cluster
* output
Args:
config (dict):
Config dictionary.
"""
output_conf = config.get('output')
if output_conf:
path = output_conf.get('path')
if not path:
raise ValueError('An output path must be provided when providing `output`.')
cluster_spec = _generate_cluster_spec(config, kubernetes=False)
cluster = KubeCluster.from_dict(cluster_spec)
workers = config['dask_cluster'].get('workers')
if not workers:
cluster.adapt()
elif isinstance(workers, int):
cluster.scale(workers)
else:
cluster.adapt(**workers)
client = Client(cluster)
client.get_versions(check=True)
try:
run = _import_function(config['run'])
kwargs = config['run']['args']
results = run(**kwargs)
finally:
client.close()
cluster.close()
if output_conf:
bucket = output_conf.get('bucket')
try:
if bucket:
aws_key = output_conf.get('key')
aws_secret = output_conf.get('secret_key')
_upload_to_s3(bucket, path, results, aws_key, aws_secret)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
results.to_csv(path)
except Exception:
print('Error storing results. Falling back to console dump.')
print(_df_to_csv_str(results))
else:
return results
def run_on_kubernetes(config, namespace='default'):
"""Run dask function inside a pod using the given config.
Create a pod, using the local kubernetes configuration that starts a Dask Cluster
using dask-kubernetes and runs a function specified within the `config` dictionary.
Args:
config (dict):
Config dictionary.
namespace (str):
Kubernetes namespace were the pod will be created.
"""
# read local config
load_kube_config()
c = Configuration()
Configuration.set_default(c)
# create client and create pod on default namespace
core_v1 = core_v1_api.CoreV1Api()
spec = _generate_cluster_spec(config, kubernetes=True)
core_v1.create_namespaced_pod(body=spec, namespace=namespace)
print('Pod created.')
def _get_parser():
parser = argparse.ArgumentParser(description='Run on Kubernetes Command Line Interface')
parser.add_argument('config', help='Path to the JSON config file.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Be verbose. Use -vv for increased verbosity.')
parser.add_argument('--create-pod', action='store_true',
help='Create a master pod and run the given `config` from there.')
parser.add_argument('-n', '--namespace', default='default',
help='Namespace were the pod will be created.')
return parser
def main():
# Parse args
parser = _get_parser()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
# Logger setup
log_level = (3 - args.verbose) * 10
fmt = '%(asctime)s - %(process)d - %(levelname)s - %(name)s - %(module)s - %(message)s'
logging.basicConfig(level=log_level, format=fmt)
with open(args.config) as config_file:
if args.config.endswith('yaml') or args.config.endswith('yml'):
config = yaml.safe_load(config_file)
else:
config = json.load(config_file)
if args.create_pod:
run_on_kubernetes(config, args.namespace)
else:
results = run_dask_function(config)
if results is not None:
print(tabulate.tabulate(
results,
tablefmt='github',
headers=results.columns
))
if __name__ == '__main__':
main()
|
get_event_categories
|
## Example Usage
List the event categories of all the RDS resources.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories()
pulumi.export("example", example_event_categories.event_categories)
```
List the event categories specific to the RDS resource `db-snapshot`.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories(source_type="db-snapshot")
pulumi.export("example", example_event_categories.event_categories)
```
:param str source_type: The type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'GetEventCategoriesResult',
'AwaitableGetEventCategoriesResult',
'get_event_categories',
]
@pulumi.output_type
class GetEventCategoriesResult:
"""
A collection of values returned by getEventCategories.
"""
def __init__(__self__, event_categories=None, id=None, source_type=None):
if event_categories and not isinstance(event_categories, list):
raise TypeError("Expected argument 'event_categories' to be a list")
pulumi.set(__self__, "event_categories", event_categories)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if source_type and not isinstance(source_type, str):
raise TypeError("Expected argument 'source_type' to be a str")
pulumi.set(__self__, "source_type", source_type)
@property
@pulumi.getter(name="eventCategories")
def event_categories(self) -> List[str]:
"""
A list of the event categories.
"""
return pulumi.get(self, "event_categories")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> Optional[str]:
return pulumi.get(self, "source_type")
class AwaitableGetEventCategoriesResult(GetEventCategoriesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventCategoriesResult(
event_categories=self.event_categories,
id=self.id,
source_type=self.source_type)
# MASKED: get_event_categories function (lines 66-105)
|
def get_event_categories(source_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventCategoriesResult:
"""
## Example Usage
List the event categories of all the RDS resources.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories()
pulumi.export("example", example_event_categories.event_categories)
```
List the event categories specific to the RDS resource `db-snapshot`.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories(source_type="db-snapshot")
pulumi.export("example", example_event_categories.event_categories)
```
:param str source_type: The type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot.
"""
__args__ = dict()
__args__['sourceType'] = source_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:rds/getEventCategories:getEventCategories', __args__, opts=opts, typ=GetEventCategoriesResult).value
return AwaitableGetEventCategoriesResult(
event_categories=__ret__.event_categories,
id=__ret__.id,
source_type=__ret__.source_type)
| 66 | 105 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'GetEventCategoriesResult',
'AwaitableGetEventCategoriesResult',
'get_event_categories',
]
@pulumi.output_type
class GetEventCategoriesResult:
"""
A collection of values returned by getEventCategories.
"""
def __init__(__self__, event_categories=None, id=None, source_type=None):
if event_categories and not isinstance(event_categories, list):
raise TypeError("Expected argument 'event_categories' to be a list")
pulumi.set(__self__, "event_categories", event_categories)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if source_type and not isinstance(source_type, str):
raise TypeError("Expected argument 'source_type' to be a str")
pulumi.set(__self__, "source_type", source_type)
@property
@pulumi.getter(name="eventCategories")
def event_categories(self) -> List[str]:
"""
A list of the event categories.
"""
return pulumi.get(self, "event_categories")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> Optional[str]:
return pulumi.get(self, "source_type")
class AwaitableGetEventCategoriesResult(GetEventCategoriesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEventCategoriesResult(
event_categories=self.event_categories,
id=self.id,
source_type=self.source_type)
def get_event_categories(source_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEventCategoriesResult:
"""
## Example Usage
List the event categories of all the RDS resources.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories()
pulumi.export("example", example_event_categories.event_categories)
```
List the event categories specific to the RDS resource `db-snapshot`.
```python
import pulumi
import pulumi_aws as aws
example_event_categories = aws.rds.get_event_categories(source_type="db-snapshot")
pulumi.export("example", example_event_categories.event_categories)
```
:param str source_type: The type of source that will be generating the events. Valid options are db-instance, db-security-group, db-parameter-group, db-snapshot, db-cluster or db-cluster-snapshot.
"""
__args__ = dict()
__args__['sourceType'] = source_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:rds/getEventCategories:getEventCategories', __args__, opts=opts, typ=GetEventCategoriesResult).value
return AwaitableGetEventCategoriesResult(
event_categories=__ret__.event_categories,
id=__ret__.id,
source_type=__ret__.source_type)
|
_parse
|
Parse the test output.
See also https://github.com/axboe/fio/blob/master/HOWTO
|
from collections import defaultdict
import requests
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.tests import PerfTest
class FIOTest(PerfTest):
TRACKER = 'fio.sc.couchbase.com'
TEMPLATE = {
'group': '{}, random mixed reads and writes, IOPS',
'metric': None,
'value': None,
}
def __init__(self, cluster_spec, test_config, verbose):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.remote = RemoteHelper(cluster_spec, verbose)
def __exit__(self, *args, **kwargs):
pass
# MASKED: _parse function (lines 29-40)
def _post(self, data):
data = pretty_dict(data)
logger.info('Posting: {}'.format(data))
requests.post('http://{}/api/v1/benchmarks'.format(self.TRACKER),
data=data)
def _report_kpi(self, stats):
for host, iops in stats.items():
data = self.TEMPLATE.copy()
data['group'] = data['group'].format(self.cluster_spec.name.title())
data['metric'] = host
data['value'] = iops
self._post(data)
def run(self):
stats = self.remote.fio(self.test_config.fio['config'])
self._report_kpi(self._parse(stats))
|
@staticmethod
def _parse(results):
"""Parse the test output.
See also https://github.com/axboe/fio/blob/master/HOWTO
"""
stats = defaultdict(int)
for host, output in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7]) # reads
stats[host] += int(job.split(';')[48]) # writes
return stats
| 29 | 40 |
from collections import defaultdict
import requests
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.tests import PerfTest
class FIOTest(PerfTest):
TRACKER = 'fio.sc.couchbase.com'
TEMPLATE = {
'group': '{}, random mixed reads and writes, IOPS',
'metric': None,
'value': None,
}
def __init__(self, cluster_spec, test_config, verbose):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.remote = RemoteHelper(cluster_spec, verbose)
def __exit__(self, *args, **kwargs):
pass
@staticmethod
def _parse(results):
"""Parse the test output.
See also https://github.com/axboe/fio/blob/master/HOWTO
"""
stats = defaultdict(int)
for host, output in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7]) # reads
stats[host] += int(job.split(';')[48]) # writes
return stats
def _post(self, data):
data = pretty_dict(data)
logger.info('Posting: {}'.format(data))
requests.post('http://{}/api/v1/benchmarks'.format(self.TRACKER),
data=data)
def _report_kpi(self, stats):
for host, iops in stats.items():
data = self.TEMPLATE.copy()
data['group'] = data['group'].format(self.cluster_spec.name.title())
data['metric'] = host
data['value'] = iops
self._post(data)
def run(self):
stats = self.remote.fio(self.test_config.fio['config'])
self._report_kpi(self._parse(stats))
|
predict
|
Predict none zero elements of coo sparse matrix X according to the fitted model.
Parameters
----------
X {array-like, sparse coo matrix} shape (m, n)
Data matrix in coo format. Values are ignored.
Returns
-------
{array-like, sparse coo matrix} shape (m, n)
Predicted values.
|
# - * - encoding : utf - 8 - * -
# pylint: disable=fixme, line-too-long
"""
Matrix factorization solver.
:copyright: 2017-2019 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
import numpy as np
import scipy
import scipy.sparse
def _get_sparse_matrixes(X):
'''Create csc, csr and coo sparse matrix from any of the above
Arguments:
X {array-like, csc, csr or coo sparse matrix}
Returns:
csc, csr, coo
'''
X_coo = X_csc = X_csr = None
if scipy.sparse.isspmatrix_coo(X):
X_coo = X
X_csr = X_coo.tocsr(True)
X_csc = X_coo.tocsc(True)
elif scipy.sparse.isspmatrix_csr(X):
X_csr = X
X_csc = X_csr.tocoo(True)
X_coo = X_csr.tocsc(True)
elif scipy.sparse.isspmatrix_csc(X):
X_csc = X
X_csr = X_csc.tocsr(True)
X_coo = X_csc.tocoo(True)
else:
assert False, "only coo, csc and csr sparse matrixes are supported"
return X_csc, X_csr, X_coo
class FactorizationH2O(object):
'''Matrix Factorization on GPU with Alternating Least Square (ALS) algorithm.
Factors a sparse rating matrix X (m by n, with N_z non-zero elements)
into a m-by-f and a f-by-n matrices.
Parameters
----------
f int
decomposition size
lambda_ float
lambda regularization
max_iter int, default: 100
number of training iterations
double_precision bool, default: False
use double precision, not yet supported
thetaT {array-like} shape (n, f), default: None
initial theta matrix
XT {array-like} shape (m, f), default: None
initial XT matrix
random_state int, default: 1234
Attributes
----------
XT {array-like} shape (m, f)
XT matrix contains user's features
thetaT {array-like} shape (n, f)
transposed theta matrix, item's features
Warnings
--------
Matrixes ``XT`` and ``thetaT`` may contain nan elements. This is because in some datasets,
there are users or items with no ratings in training set. That results in solutions of
a system of linear equations becomes nan. Such elements can be easily removed with numpy
functions like numpy.nan_to_num, but existence of them may be useful for troubleshooting
purposes.
'''
def __init__(self, f, lambda_, max_iter=100, double_precision=False, thetaT=None, XT=None, random_state=1234):
assert not double_precision, 'double precision is not yet supported'
assert f % 10 == 0, 'f has to be a multiple of 10'
self.f = f
self.lambda_ = lambda_
self.double_precision = double_precision
self.dtype = np.float64 if self.double_precision else np.float32
self.thetaT = thetaT
self.XT = XT
self.max_iter = max_iter
self.random_state = random_state
def _load_lib(self):
from ..libs.lib_utils import GPUlib
gpu_lib = GPUlib().get(1)
return gpu_lib
def fit(self, X, y=None, X_test=None, X_BATCHES=1, THETA_BATCHES=1, early_stopping_rounds=None, verbose=False, scores=None):
#pylint: disable=unused-argument
'''Learn model from rating matrix X.
Parameters
----------
X {array-like, sparse matrix}, shape (m, n)
Data matrix to be decomposed.
y None
Ignored
X_test {array-like, coo sparse matrix}, shape (m, n)
Data matrix for cross validation.
X_BATCHES int, default: 1
Batches to split XT, increase this parameter in case out of memory error.
THETA_BATCHES int, default: 1
Batches to split theta, increase this parameter in case out of memory error.
early_stopping_rounds int, default: None
Activates early stopping. Cross validation error needs to decrease
at least every <early_stopping_rounds> round(s) to continue training. Requires <X_test>.
Returns the model from the last iteration (not the best one). If early stopping occurs,
the model will have three additional fields: best_cv_score, best_train_score and best_iteration.
verbose bool, default: False
Prints training and validation score(if applicable) on each iteration.
scores {list}
List of tuples with train, cv score for every iteration.
Returns
-------
self : returns an instance of self.
'''
csc_X, csr_X, coo_X = _get_sparse_matrixes(X)
if early_stopping_rounds is not None:
assert X_test is not None, 'X_test is mandatory with early stopping'
if X_test is not None:
assert scipy.sparse.isspmatrix_coo(
X_test), 'X_test must be a coo sparse scipy matrix'
assert X.shape == X_test.shape
assert X_test.dtype == self.dtype
assert X.dtype == self.dtype
coo_X_test = X_test
lib = self._load_lib()
if self.double_precision:
make_data = lib.make_factorization_data_double
run_step = lib.run_factorization_step_double
factorization_score = lib.factorization_score_double
copy_fecatorization_result = lib.copy_fecatorization_result_double
free_data = lib.free_data_double
else:
make_data = lib.make_factorization_data_float
run_step = lib.run_factorization_step_float
factorization_score = lib.factorization_score_float
copy_fecatorization_result = lib.copy_fecatorization_result_float
free_data = lib.free_data_float
m = coo_X.shape[0]
n = coo_X.shape[1]
nnz = csc_X.nnz
if coo_X_test is None:
nnz_test = 0
else:
nnz_test = coo_X_test.nnz
rs = np.random.RandomState(self.random_state)
if self.thetaT is None:
self.thetaT = rs.rand(n, self.f).astype(self.dtype)
else:
assert self.thetaT.dtype == self.dtype
if self.XT is None:
self.XT = rs.rand(m, self.f).astype(self.dtype)
else:
assert self.XT.dtype == self.dtype
csrRowIndexDevicePtr = None
csrColIndexDevicePtr = None
csrValDevicePtr = None
cscRowIndexDevicePtr = None
cscColIndexDevicePtr = None
cscValDevicePtr = None
cooRowIndexDevicePtr = None
cooColIndexDevicePtr = None
cooValDevicePtr = None
thetaTDevice = None
XTDevice = None
cooRowIndexTestDevicePtr = None
cooColIndexTestDevicePtr = None
cooValTestDevicePtr = None
status, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, \
cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, \
cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, \
thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, \
cooColIndexTestDevicePtr, cooValTestDevicePtr = make_data( # pylint: disable=W0212
m, n, self.f, nnz, nnz_test, csr_X.indptr, csr_X.indices, csr_X.data,
csc_X.indices, csc_X.indptr, csc_X.data,
coo_X.row, coo_X.col, coo_X.data,
self.thetaT, self.XT, coo_X_test.row if coo_X_test is not None else None,
coo_X_test.col if coo_X_test is not None else None, coo_X_test.data if coo_X_test is not None else None,
csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr,
cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr,
thetaTDevice, XTDevice, cooRowIndexTestDevicePtr,
cooColIndexTestDevicePtr, cooValTestDevicePtr)
assert status == 0, 'Failure uploading the data'
self.best_train_score = np.inf
self.best_cv_score = np.inf
self.best_iteration = -1
cv_score = train_score = np.inf
for i in range(self.max_iter):
status = run_step(m,
n,
self.f,
nnz,
self.lambda_,
csrRowIndexDevicePtr,
csrColIndexDevicePtr,
csrValDevicePtr,
cscRowIndexDevicePtr,
cscColIndexDevicePtr,
cscValDevicePtr,
thetaTDevice,
XTDevice,
X_BATCHES,
THETA_BATCHES)
if verbose or scores is not None:
result = factorization_score(m,
n,
self.f,
nnz,
self.lambda_,
thetaTDevice,
XTDevice,
cooRowIndexDevicePtr,
cooColIndexDevicePtr,
cooValDevicePtr)
train_score = result[0]
if X_test is not None and (verbose or early_stopping_rounds is not None or scores is not None):
result = factorization_score(m,
n,
self.f,
nnz_test,
self.lambda_,
thetaTDevice,
XTDevice,
cooRowIndexTestDevicePtr,
cooColIndexTestDevicePtr,
cooValTestDevicePtr)
cv_score = result[0]
if verbose:
print("iteration {0} train: {1} cv: {2}".format(
i, train_score, cv_score))
if scores is not None:
scores.append((train_score, cv_score))
if early_stopping_rounds is not None:
if self.best_cv_score > cv_score:
self.best_cv_score = cv_score
self.best_train_score = train_score
self.best_iteration = i
if (i - self.best_iteration) > early_stopping_rounds:
if verbose:
print('best iteration:{0} train: {1} cv: {2}'.format(
self.best_iteration, self.best_train_score, self.best_cv_score))
break
lib.free_data_int(csrRowIndexDevicePtr)
lib.free_data_int(csrColIndexDevicePtr)
free_data(csrValDevicePtr)
lib.free_data_int(cscRowIndexDevicePtr)
lib.free_data_int(cscColIndexDevicePtr)
free_data(cscValDevicePtr)
lib.free_data_int(cooRowIndexDevicePtr)
lib.free_data_int(cooColIndexDevicePtr)
free_data(cooValDevicePtr)
lib.free_data_int(cooRowIndexTestDevicePtr)
lib.free_data_int(cooColIndexTestDevicePtr)
free_data(cooValTestDevicePtr)
copy_fecatorization_result(self.XT, XTDevice, m * self.f)
copy_fecatorization_result(self.thetaT, thetaTDevice, n * self.f)
free_data(thetaTDevice)
free_data(XTDevice)
return self
# MASKED: predict function (lines 294-316)
|
def predict(self, X):
'''Predict none zero elements of coo sparse matrix X according to the fitted model.
Parameters
----------
X {array-like, sparse coo matrix} shape (m, n)
Data matrix in coo format. Values are ignored.
Returns
-------
{array-like, sparse coo matrix} shape (m, n)
Predicted values.
'''
assert self.XT is not None and self.thetaT is not None, 'tranform is invoked on an unfitted model'
assert scipy.sparse.isspmatrix_coo(
X), 'convert X to coo sparse matrix'
assert X.dtype == self.dtype
a = np.take(self.XT, X.row, axis=0)
b = np.take(self.thetaT, X.col, axis=0)
val = np.sum(a * b, axis=1)
return scipy.sparse.coo_matrix((val, (X.row, X.col)), shape=X.shape)
| 294 | 316 |
# - * - encoding : utf - 8 - * -
# pylint: disable=fixme, line-too-long
"""
Matrix factorization solver.
:copyright: 2017-2019 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
import numpy as np
import scipy
import scipy.sparse
def _get_sparse_matrixes(X):
'''Create csc, csr and coo sparse matrix from any of the above
Arguments:
X {array-like, csc, csr or coo sparse matrix}
Returns:
csc, csr, coo
'''
X_coo = X_csc = X_csr = None
if scipy.sparse.isspmatrix_coo(X):
X_coo = X
X_csr = X_coo.tocsr(True)
X_csc = X_coo.tocsc(True)
elif scipy.sparse.isspmatrix_csr(X):
X_csr = X
X_csc = X_csr.tocoo(True)
X_coo = X_csr.tocsc(True)
elif scipy.sparse.isspmatrix_csc(X):
X_csc = X
X_csr = X_csc.tocsr(True)
X_coo = X_csc.tocoo(True)
else:
assert False, "only coo, csc and csr sparse matrixes are supported"
return X_csc, X_csr, X_coo
class FactorizationH2O(object):
'''Matrix Factorization on GPU with Alternating Least Square (ALS) algorithm.
Factors a sparse rating matrix X (m by n, with N_z non-zero elements)
into a m-by-f and a f-by-n matrices.
Parameters
----------
f int
decomposition size
lambda_ float
lambda regularization
max_iter int, default: 100
number of training iterations
double_precision bool, default: False
use double precision, not yet supported
thetaT {array-like} shape (n, f), default: None
initial theta matrix
XT {array-like} shape (m, f), default: None
initial XT matrix
random_state int, default: 1234
Attributes
----------
XT {array-like} shape (m, f)
XT matrix contains user's features
thetaT {array-like} shape (n, f)
transposed theta matrix, item's features
Warnings
--------
Matrixes ``XT`` and ``thetaT`` may contain nan elements. This is because in some datasets,
there are users or items with no ratings in training set. That results in solutions of
a system of linear equations becomes nan. Such elements can be easily removed with numpy
functions like numpy.nan_to_num, but existence of them may be useful for troubleshooting
purposes.
'''
def __init__(self, f, lambda_, max_iter=100, double_precision=False, thetaT=None, XT=None, random_state=1234):
assert not double_precision, 'double precision is not yet supported'
assert f % 10 == 0, 'f has to be a multiple of 10'
self.f = f
self.lambda_ = lambda_
self.double_precision = double_precision
self.dtype = np.float64 if self.double_precision else np.float32
self.thetaT = thetaT
self.XT = XT
self.max_iter = max_iter
self.random_state = random_state
def _load_lib(self):
from ..libs.lib_utils import GPUlib
gpu_lib = GPUlib().get(1)
return gpu_lib
def fit(self, X, y=None, X_test=None, X_BATCHES=1, THETA_BATCHES=1, early_stopping_rounds=None, verbose=False, scores=None):
#pylint: disable=unused-argument
'''Learn model from rating matrix X.
Parameters
----------
X {array-like, sparse matrix}, shape (m, n)
Data matrix to be decomposed.
y None
Ignored
X_test {array-like, coo sparse matrix}, shape (m, n)
Data matrix for cross validation.
X_BATCHES int, default: 1
Batches to split XT, increase this parameter in case out of memory error.
THETA_BATCHES int, default: 1
Batches to split theta, increase this parameter in case out of memory error.
early_stopping_rounds int, default: None
Activates early stopping. Cross validation error needs to decrease
at least every <early_stopping_rounds> round(s) to continue training. Requires <X_test>.
Returns the model from the last iteration (not the best one). If early stopping occurs,
the model will have three additional fields: best_cv_score, best_train_score and best_iteration.
verbose bool, default: False
Prints training and validation score(if applicable) on each iteration.
scores {list}
List of tuples with train, cv score for every iteration.
Returns
-------
self : returns an instance of self.
'''
csc_X, csr_X, coo_X = _get_sparse_matrixes(X)
if early_stopping_rounds is not None:
assert X_test is not None, 'X_test is mandatory with early stopping'
if X_test is not None:
assert scipy.sparse.isspmatrix_coo(
X_test), 'X_test must be a coo sparse scipy matrix'
assert X.shape == X_test.shape
assert X_test.dtype == self.dtype
assert X.dtype == self.dtype
coo_X_test = X_test
lib = self._load_lib()
if self.double_precision:
make_data = lib.make_factorization_data_double
run_step = lib.run_factorization_step_double
factorization_score = lib.factorization_score_double
copy_fecatorization_result = lib.copy_fecatorization_result_double
free_data = lib.free_data_double
else:
make_data = lib.make_factorization_data_float
run_step = lib.run_factorization_step_float
factorization_score = lib.factorization_score_float
copy_fecatorization_result = lib.copy_fecatorization_result_float
free_data = lib.free_data_float
m = coo_X.shape[0]
n = coo_X.shape[1]
nnz = csc_X.nnz
if coo_X_test is None:
nnz_test = 0
else:
nnz_test = coo_X_test.nnz
rs = np.random.RandomState(self.random_state)
if self.thetaT is None:
self.thetaT = rs.rand(n, self.f).astype(self.dtype)
else:
assert self.thetaT.dtype == self.dtype
if self.XT is None:
self.XT = rs.rand(m, self.f).astype(self.dtype)
else:
assert self.XT.dtype == self.dtype
csrRowIndexDevicePtr = None
csrColIndexDevicePtr = None
csrValDevicePtr = None
cscRowIndexDevicePtr = None
cscColIndexDevicePtr = None
cscValDevicePtr = None
cooRowIndexDevicePtr = None
cooColIndexDevicePtr = None
cooValDevicePtr = None
thetaTDevice = None
XTDevice = None
cooRowIndexTestDevicePtr = None
cooColIndexTestDevicePtr = None
cooValTestDevicePtr = None
status, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, \
cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, \
cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, \
thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, \
cooColIndexTestDevicePtr, cooValTestDevicePtr = make_data( # pylint: disable=W0212
m, n, self.f, nnz, nnz_test, csr_X.indptr, csr_X.indices, csr_X.data,
csc_X.indices, csc_X.indptr, csc_X.data,
coo_X.row, coo_X.col, coo_X.data,
self.thetaT, self.XT, coo_X_test.row if coo_X_test is not None else None,
coo_X_test.col if coo_X_test is not None else None, coo_X_test.data if coo_X_test is not None else None,
csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr,
cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr,
thetaTDevice, XTDevice, cooRowIndexTestDevicePtr,
cooColIndexTestDevicePtr, cooValTestDevicePtr)
assert status == 0, 'Failure uploading the data'
self.best_train_score = np.inf
self.best_cv_score = np.inf
self.best_iteration = -1
cv_score = train_score = np.inf
for i in range(self.max_iter):
status = run_step(m,
n,
self.f,
nnz,
self.lambda_,
csrRowIndexDevicePtr,
csrColIndexDevicePtr,
csrValDevicePtr,
cscRowIndexDevicePtr,
cscColIndexDevicePtr,
cscValDevicePtr,
thetaTDevice,
XTDevice,
X_BATCHES,
THETA_BATCHES)
if verbose or scores is not None:
result = factorization_score(m,
n,
self.f,
nnz,
self.lambda_,
thetaTDevice,
XTDevice,
cooRowIndexDevicePtr,
cooColIndexDevicePtr,
cooValDevicePtr)
train_score = result[0]
if X_test is not None and (verbose or early_stopping_rounds is not None or scores is not None):
result = factorization_score(m,
n,
self.f,
nnz_test,
self.lambda_,
thetaTDevice,
XTDevice,
cooRowIndexTestDevicePtr,
cooColIndexTestDevicePtr,
cooValTestDevicePtr)
cv_score = result[0]
if verbose:
print("iteration {0} train: {1} cv: {2}".format(
i, train_score, cv_score))
if scores is not None:
scores.append((train_score, cv_score))
if early_stopping_rounds is not None:
if self.best_cv_score > cv_score:
self.best_cv_score = cv_score
self.best_train_score = train_score
self.best_iteration = i
if (i - self.best_iteration) > early_stopping_rounds:
if verbose:
print('best iteration:{0} train: {1} cv: {2}'.format(
self.best_iteration, self.best_train_score, self.best_cv_score))
break
lib.free_data_int(csrRowIndexDevicePtr)
lib.free_data_int(csrColIndexDevicePtr)
free_data(csrValDevicePtr)
lib.free_data_int(cscRowIndexDevicePtr)
lib.free_data_int(cscColIndexDevicePtr)
free_data(cscValDevicePtr)
lib.free_data_int(cooRowIndexDevicePtr)
lib.free_data_int(cooColIndexDevicePtr)
free_data(cooValDevicePtr)
lib.free_data_int(cooRowIndexTestDevicePtr)
lib.free_data_int(cooColIndexTestDevicePtr)
free_data(cooValTestDevicePtr)
copy_fecatorization_result(self.XT, XTDevice, m * self.f)
copy_fecatorization_result(self.thetaT, thetaTDevice, n * self.f)
free_data(thetaTDevice)
free_data(XTDevice)
return self
def predict(self, X):
'''Predict none zero elements of coo sparse matrix X according to the fitted model.
Parameters
----------
X {array-like, sparse coo matrix} shape (m, n)
Data matrix in coo format. Values are ignored.
Returns
-------
{array-like, sparse coo matrix} shape (m, n)
Predicted values.
'''
assert self.XT is not None and self.thetaT is not None, 'tranform is invoked on an unfitted model'
assert scipy.sparse.isspmatrix_coo(
X), 'convert X to coo sparse matrix'
assert X.dtype == self.dtype
a = np.take(self.XT, X.row, axis=0)
b = np.take(self.thetaT, X.col, axis=0)
val = np.sum(a * b, axis=1)
return scipy.sparse.coo_matrix((val, (X.row, X.col)), shape=X.shape)
|
ascii_escaped
|
If val is pure ascii, returns it as a str(). Otherwise, escapes
bytes objects into a sequence of escaped bytes:
b'ôÅÖ' -> '\xc3\xb4\xc5\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
|
"""
python version compatibility code
"""
import functools
import inspect
import io
import re
import sys
from contextlib import contextmanager
from inspect import Parameter
from inspect import signature
import attr
import py
import _pytest
from _pytest._io.saferepr import saferepr
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
NOTSET = object()
MODULE_NOT_FOUND_ERROR = (
"ModuleNotFoundError" if sys.version_info[:2] >= (3, 6) else "ImportError"
)
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata # noqa: F401
else:
import importlib_metadata # noqa: F401
def _format_args(func):
return str(signature(func))
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(""))
def is_generator(func):
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func):
"""
Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
importing asyncio directly, which in turns also initializes the "logging"
module as a side-effect (see issue #8).
"""
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
def getlocation(function, curdir=None):
function = get_real_func(function)
fn = py.path.local(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if curdir is not None and fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
return len(
[
p
for p in patchings
if not p.attribute_name
and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
]
)
def getfuncargnames(function, *, name: str = "", is_method=False, cls=None):
"""Returns the names of a function's mandatory arguments.
This should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
The name parameter should be the original name in which the function was collected.
@RonnyPfannschmidt: This function should be refactored when we
revisit fixtures. The fixture mechanism should ask the node for
the fixture names, and not try to obtain directly from the
function object well after collection has occurred.
"""
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
"Could not determine arguments of {!r}: {}".format(function, e),
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
if not name:
name = function.__name__
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
# parameter name.
if is_method or (
cls and not isinstance(cls.__dict__.get(name, None), staticmethod)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
if sys.version_info < (3, 7):
@contextmanager
def nullcontext():
yield
else:
from contextlib import nullcontext # noqa
def get_default_arg_names(function):
# Note: this code intentionally mirrors the code at the beginning of getfuncargnames,
# to get the arguments which were excluded from its result because they had default values
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
_non_printable_ascii_translate_table = {
i: "\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
)
def _translate_non_printable(s):
return s.translate(_non_printable_ascii_translate_table)
STRING_TYPES = bytes, str
def _bytes_to_ascii(val):
return val.decode("ascii", "backslashreplace")
# MASKED: ascii_escaped function (lines 187-209)
@attr.s
class _PytestWrapper:
"""Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object
when we are creating fixtures, because we wrap the function object ourselves with a decorator
to issue warnings when the fixture function is called directly.
"""
obj = attr.ib()
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
# to trigger a warning if it gets called directly instead of by pytest: we don't
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=saferepr(start_obj), current=saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
"""
Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time
returning a bound method to ``holder`` if the original object was a bound method.
"""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception: # pragma: no cover
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, "place_as"):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
instead of Exception (for more details check #2707)
"""
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj):
"""Ignore any exception via isinstance on Python 3."""
try:
return inspect.isclass(obj)
except Exception:
return False
COLLECT_FAKEMODULE_ATTRIBUTES = (
"Collector",
"Module",
"Function",
"Instance",
"Session",
"Item",
"Class",
"File",
"_fillfuncargs",
)
def _setup_collect_fakemodule():
from types import ModuleType
import pytest
pytest.collect = ModuleType("pytest.collect")
pytest.collect.__all__ = [] # used for setns
for attr_name in COLLECT_FAKEMODULE_ATTRIBUTES:
setattr(pytest.collect, attr_name, getattr(pytest, attr_name))
class CaptureIO(io.TextIOWrapper):
def __init__(self):
super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True)
def getvalue(self):
return self.buffer.getvalue().decode("UTF-8")
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
import warnings
from _pytest.deprecated import FUNCARGNAMES
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames
|
def ascii_escaped(val):
"""If val is pure ascii, returns it as a str(). Otherwise, escapes
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> '\\xc3\\xb4\\xc5\\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
'4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode("unicode_escape").decode("ascii")
return _translate_non_printable(ret)
| 187 | 209 |
"""
python version compatibility code
"""
import functools
import inspect
import io
import re
import sys
from contextlib import contextmanager
from inspect import Parameter
from inspect import signature
import attr
import py
import _pytest
from _pytest._io.saferepr import saferepr
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
NOTSET = object()
MODULE_NOT_FOUND_ERROR = (
"ModuleNotFoundError" if sys.version_info[:2] >= (3, 6) else "ImportError"
)
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata # noqa: F401
else:
import importlib_metadata # noqa: F401
def _format_args(func):
return str(signature(func))
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(""))
def is_generator(func):
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func):
"""
Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
importing asyncio directly, which in turns also initializes the "logging"
module as a side-effect (see issue #8).
"""
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
def getlocation(function, curdir=None):
function = get_real_func(function)
fn = py.path.local(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if curdir is not None and fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
return len(
[
p
for p in patchings
if not p.attribute_name
and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
]
)
def getfuncargnames(function, *, name: str = "", is_method=False, cls=None):
"""Returns the names of a function's mandatory arguments.
This should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
The name parameter should be the original name in which the function was collected.
@RonnyPfannschmidt: This function should be refactored when we
revisit fixtures. The fixture mechanism should ask the node for
the fixture names, and not try to obtain directly from the
function object well after collection has occurred.
"""
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
"Could not determine arguments of {!r}: {}".format(function, e),
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
if not name:
name = function.__name__
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
# parameter name.
if is_method or (
cls and not isinstance(cls.__dict__.get(name, None), staticmethod)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
if sys.version_info < (3, 7):
@contextmanager
def nullcontext():
yield
else:
from contextlib import nullcontext # noqa
def get_default_arg_names(function):
# Note: this code intentionally mirrors the code at the beginning of getfuncargnames,
# to get the arguments which were excluded from its result because they had default values
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
_non_printable_ascii_translate_table = {
i: "\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
)
def _translate_non_printable(s):
return s.translate(_non_printable_ascii_translate_table)
STRING_TYPES = bytes, str
def _bytes_to_ascii(val):
return val.decode("ascii", "backslashreplace")
def ascii_escaped(val):
"""If val is pure ascii, returns it as a str(). Otherwise, escapes
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> '\\xc3\\xb4\\xc5\\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
'4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode("unicode_escape").decode("ascii")
return _translate_non_printable(ret)
@attr.s
class _PytestWrapper:
"""Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object
when we are creating fixtures, because we wrap the function object ourselves with a decorator
to issue warnings when the fixture function is called directly.
"""
obj = attr.ib()
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
# to trigger a warning if it gets called directly instead of by pytest: we don't
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=saferepr(start_obj), current=saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
"""
Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time
returning a bound method to ``holder`` if the original object was a bound method.
"""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception: # pragma: no cover
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, "place_as"):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
instead of Exception (for more details check #2707)
"""
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj):
"""Ignore any exception via isinstance on Python 3."""
try:
return inspect.isclass(obj)
except Exception:
return False
COLLECT_FAKEMODULE_ATTRIBUTES = (
"Collector",
"Module",
"Function",
"Instance",
"Session",
"Item",
"Class",
"File",
"_fillfuncargs",
)
def _setup_collect_fakemodule():
from types import ModuleType
import pytest
pytest.collect = ModuleType("pytest.collect")
pytest.collect.__all__ = [] # used for setns
for attr_name in COLLECT_FAKEMODULE_ATTRIBUTES:
setattr(pytest.collect, attr_name, getattr(pytest, attr_name))
class CaptureIO(io.TextIOWrapper):
def __init__(self):
super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True)
def getvalue(self):
return self.buffer.getvalue().decode("UTF-8")
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
import warnings
from _pytest.deprecated import FUNCARGNAMES
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames
|
resnet18
|
Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# MASKED: resnet18 function (lines 171-179)
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
| 171 | 179 |
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
resnet34
|
Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
# MASKED: resnet34 function (lines 182-190)
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
| 182 | 190 |
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
resnet50
|
Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
# MASKED: resnet50 function (lines 193-201)
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
| 193 | 201 |
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
resnet101
|
Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
# MASKED: resnet101 function (lines 204-212)
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
| 204 | 212 |
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
resnet152
|
Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
# MASKED: resnet152 function (lines 215-223)
|
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 215 | 223 |
#!/usr/bin/env python
# coding: utf-8
#
# This code is based on torchvison resnet
# URL: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, dilation=dilation, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, dilation, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride, dilation, downsample=None, expansion=4):
super(Bottleneck, self).__init__()
self.expansion = expansion
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride, dilation, dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, num_classes=1000, input_channels=3):
super(ResNet, self).__init__()
if output_stride == 8:
stride = [1, 2, 1, 1]
dilation = [1, 1, 2, 2]
elif output_stride == 16:
stride = [1, 2, 2, 1]
dilation = [1, 1, 1, 2]
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=stride[0], dilation=dilation[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=stride[1], dilation=dilation[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=stride[2], dilation=dilation[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=stride[3], dilation=dilation[3])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
set_random_seed
|
function: Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
|
import sys
import time
import torch
import random
import argparse
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision import datasets
from torch.utils.data import DataLoader
# new #
import torch.cuda.amp as amp
def printParaNum(model):
'''
function: print the number of total parameters and trainable parameters
'''
total_params = sum(p.numel() for p in model.parameters())
total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Total parameters: %d' % total_params)
print('Trainable parameters: %d' % total_trainable_params)
# MASKED: set_random_seed function (lines 28-46)
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.model = nn.Sequential(
nn.ReflectionPad2d(1), nn.Conv2d(1, 3, 3, 2), nn.BatchNorm2d(3), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(3, 3, 3, 1), nn.BatchNorm2d(3), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(3, 8, 3, 2), nn.BatchNorm2d(8), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(8, 8, 3, 1), nn.BatchNorm2d(8), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(8, 16, 3, 2), nn.BatchNorm2d(16), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(16, 16, 3, 1), nn.BatchNorm2d(16), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(16, 32, 3, 2), nn.BatchNorm2d(32), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(32, 32, 3, 1), nn.BatchNorm2d(32), nn.LeakyReLU(0.2, inplace=True),
nn.Flatten(), nn.Linear(128, 10)
)
self.initialize_weights()
def forward(self, img):
out = self.model(img)
return out
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight.data, 0, 0.01)
m.bias.data.zero_()
time_begin = time.time()
print('---------------------------------------- step 1/5 : parameters preparing... ----------------------------------------')
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=5, help="number of epochs of training")
parser.add_argument("--lr", type=float, default=0.0002, help="learning rate")
parser.add_argument("--batch_size", type=int, default=2048, help="size of the batches")
parser.add_argument("--workers", type=int, default=4, help="number of cpu threads to use during batch generation")
parser.add_argument("--dataset", type=str, default='../dataset/mnist', help="dataset root")
parser.add_argument("--result_dir", type=str, default='../result', help="dir for saving the results")
opt = parser.parse_args()
print(opt)
set_random_seed(1234, deterministic=True)
time_1 = time.time()
print('---------------------------------------- step 2/5 : data loading... ------------------------------------------------')
dataset = datasets.MNIST(opt.dataset, train=True, download=True,
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]))
dataloader = DataLoader(dataset=dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
time_2 = time.time()
print('---------------------------------------- step 3/5 : model defining... ----------------------------------------------')
model = Model().cuda()
printParaNum(model)
time_3 = time.time()
print('---------------------------------------- step 4/5 : requisites defining... -----------------------------------------')
# Loss function
loss_func = nn.CrossEntropyLoss()
# Optimizers
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(0.5, 0.999))
# NEW #
scaler = amp.GradScaler()
time_4 = time.time()
print('---------------------------------------- step 5/5 : training... ----------------------------------------------------')
f = open(opt.result_dir + '/log_' + sys.argv[0][0:-3] + '.txt', 'w')
f.write('Type: single machine, single card, mixing precision' + '\n')
f.write('Parallel manner: none' + '\n')
f.write('Mixing manner: amp' + '\n')
f.write('Setting: epochs: {}, lr: {}, batch_size: {}, workers: {}'.format(opt.epochs, opt.lr, opt.batch_size, opt.workers) + '\n')
f.write('----------------------------' + '\n')
f.write('Training: ' + '\n')
f.write('----------------------------' + '\n')
time_4_dataloading = 0
time_4_computing = 0
for epoch in range(opt.epochs):
time_4_begin = time.time()
for i, (imgs, labels) in enumerate(dataloader):
imgs = imgs.cuda()
labels = labels.cuda()
time_temp = time.time()
time_4_dataloading += time_temp - time_4_begin
optimizer.zero_grad()
# new #
with amp.autocast():
pred = model(imgs)
loss = loss_func(pred, labels)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
_, pred = torch.max(pred, 1)
acc = (pred == labels).sum().item() / len(labels)
print('Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>4}/{:0>4}] Loss: {:.4f} Acc: {:.4f}'.format(
epoch + 1, opt.epochs, i + 1, len(dataloader), loss, acc))
f.write('Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>4}/{:0>4}] Loss: {:.4f} Acc: {:.4f}'.format(
epoch + 1, opt.epochs, i + 1, len(dataloader), loss, acc) + '\n')
time_4_computing += time.time() - time_temp
time_4_begin = time.time()
time_5 = time.time()
f.write('\n')
f.write('TIME COST' + '\n')
f.write('Parameters preparing: {:.6f}(s)'.format(time_1 - time_begin) + '\n')
f.write('Data loading: {:.6f}(s)'.format(time_2 - time_1) + '\n')
f.write('Model defining: {:.6f}(s)'.format(time_3 - time_2) + '\n')
f.write('Requisites defining: {:.6f}(s)'.format(time_4 - time_3) + '\n')
f.write('Training: {:.6f}(s)'.format(time_5 - time_4) + '\n')
f.write(' Training (dataloading): {:.6f}(s)'.format(time_4_dataloading) + '\n')
f.write(' Training (computing): {:.6f}(s)'.format(time_4_computing) + '\n')
f.close()
torch.save(model.state_dict(), opt.result_dir + '/model_' + sys.argv[0][0:-3] + '.pkl')
|
def set_random_seed(seed, deterministic=False):
'''
function: Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
'''
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| 28 | 46 |
import sys
import time
import torch
import random
import argparse
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision import datasets
from torch.utils.data import DataLoader
# new #
import torch.cuda.amp as amp
def printParaNum(model):
'''
function: print the number of total parameters and trainable parameters
'''
total_params = sum(p.numel() for p in model.parameters())
total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Total parameters: %d' % total_params)
print('Trainable parameters: %d' % total_trainable_params)
def set_random_seed(seed, deterministic=False):
'''
function: Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
'''
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.model = nn.Sequential(
nn.ReflectionPad2d(1), nn.Conv2d(1, 3, 3, 2), nn.BatchNorm2d(3), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(3, 3, 3, 1), nn.BatchNorm2d(3), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(3, 8, 3, 2), nn.BatchNorm2d(8), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(8, 8, 3, 1), nn.BatchNorm2d(8), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(8, 16, 3, 2), nn.BatchNorm2d(16), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(16, 16, 3, 1), nn.BatchNorm2d(16), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(16, 32, 3, 2), nn.BatchNorm2d(32), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(32, 32, 3, 1), nn.BatchNorm2d(32), nn.LeakyReLU(0.2, inplace=True),
nn.Flatten(), nn.Linear(128, 10)
)
self.initialize_weights()
def forward(self, img):
out = self.model(img)
return out
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight.data, 0, 0.01)
m.bias.data.zero_()
time_begin = time.time()
print('---------------------------------------- step 1/5 : parameters preparing... ----------------------------------------')
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=5, help="number of epochs of training")
parser.add_argument("--lr", type=float, default=0.0002, help="learning rate")
parser.add_argument("--batch_size", type=int, default=2048, help="size of the batches")
parser.add_argument("--workers", type=int, default=4, help="number of cpu threads to use during batch generation")
parser.add_argument("--dataset", type=str, default='../dataset/mnist', help="dataset root")
parser.add_argument("--result_dir", type=str, default='../result', help="dir for saving the results")
opt = parser.parse_args()
print(opt)
set_random_seed(1234, deterministic=True)
time_1 = time.time()
print('---------------------------------------- step 2/5 : data loading... ------------------------------------------------')
dataset = datasets.MNIST(opt.dataset, train=True, download=True,
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]))
dataloader = DataLoader(dataset=dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
time_2 = time.time()
print('---------------------------------------- step 3/5 : model defining... ----------------------------------------------')
model = Model().cuda()
printParaNum(model)
time_3 = time.time()
print('---------------------------------------- step 4/5 : requisites defining... -----------------------------------------')
# Loss function
loss_func = nn.CrossEntropyLoss()
# Optimizers
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(0.5, 0.999))
# NEW #
scaler = amp.GradScaler()
time_4 = time.time()
print('---------------------------------------- step 5/5 : training... ----------------------------------------------------')
f = open(opt.result_dir + '/log_' + sys.argv[0][0:-3] + '.txt', 'w')
f.write('Type: single machine, single card, mixing precision' + '\n')
f.write('Parallel manner: none' + '\n')
f.write('Mixing manner: amp' + '\n')
f.write('Setting: epochs: {}, lr: {}, batch_size: {}, workers: {}'.format(opt.epochs, opt.lr, opt.batch_size, opt.workers) + '\n')
f.write('----------------------------' + '\n')
f.write('Training: ' + '\n')
f.write('----------------------------' + '\n')
time_4_dataloading = 0
time_4_computing = 0
for epoch in range(opt.epochs):
time_4_begin = time.time()
for i, (imgs, labels) in enumerate(dataloader):
imgs = imgs.cuda()
labels = labels.cuda()
time_temp = time.time()
time_4_dataloading += time_temp - time_4_begin
optimizer.zero_grad()
# new #
with amp.autocast():
pred = model(imgs)
loss = loss_func(pred, labels)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
_, pred = torch.max(pred, 1)
acc = (pred == labels).sum().item() / len(labels)
print('Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>4}/{:0>4}] Loss: {:.4f} Acc: {:.4f}'.format(
epoch + 1, opt.epochs, i + 1, len(dataloader), loss, acc))
f.write('Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>4}/{:0>4}] Loss: {:.4f} Acc: {:.4f}'.format(
epoch + 1, opt.epochs, i + 1, len(dataloader), loss, acc) + '\n')
time_4_computing += time.time() - time_temp
time_4_begin = time.time()
time_5 = time.time()
f.write('\n')
f.write('TIME COST' + '\n')
f.write('Parameters preparing: {:.6f}(s)'.format(time_1 - time_begin) + '\n')
f.write('Data loading: {:.6f}(s)'.format(time_2 - time_1) + '\n')
f.write('Model defining: {:.6f}(s)'.format(time_3 - time_2) + '\n')
f.write('Requisites defining: {:.6f}(s)'.format(time_4 - time_3) + '\n')
f.write('Training: {:.6f}(s)'.format(time_5 - time_4) + '\n')
f.write(' Training (dataloading): {:.6f}(s)'.format(time_4_dataloading) + '\n')
f.write(' Training (computing): {:.6f}(s)'.format(time_4_computing) + '\n')
f.close()
torch.save(model.state_dict(), opt.result_dir + '/model_' + sys.argv[0][0:-3] + '.pkl')
|
demo
|
Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
|
import os
import pathlib
from flask import Flask
from flask import request
from flask import redirect
from flask import url_for
from flask import session
from flask import render_template
from flask.json import jsonify
from td.app.auth import FlaskTDAuth
from configparser import ConfigParser
# Define the templates folder.
template_folder_path: pathlib.Path = pathlib.Path(__file__).parents[0]
template_folder_path: pathlib.Path = template_folder_path.joinpath('templates')
# Create the App.
app = Flask('TD_oAuth_App', template_folder=template_folder_path.resolve())
@app.route("/")
def home():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
return render_template("index.html")
# MASKED: demo function (lines 33-47)
@app.route("/login/callback", methods=["GET"])
def callback():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
# Grab the Refresh and Access Token.
token_dict = app.config['auth_client'].grab_access_token_and_refresh_token(url=request.url)
# Store it in the Session.
session['oauth_token'] = token_dict
if app.config['call_close']:
return redirect(url_for('shutdown'))
return jsonify(token_dict)
@app.route("/login/refresh", methods=["GET"])
def refresh():
# Grab the Refresh Token.
refresh_token_dict = app.config['auth_client'].grab_refresh_token()
return jsonify(refresh_token_dict)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
def run(flask_client: FlaskTDAuth, close_after: bool = False):
certs_pem = pathlib.Path(__file__).parents[0].joinpath('certs/cert.pem')
certs_key = pathlib.Path(__file__).parents[0].joinpath('certs/key.pem')
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
app.config['auth_client'] = flask_client
app.config['call_close'] = close_after
app.run(
ssl_context=(certs_pem, certs_key),
host='localhost',
port=5000,
debug=True
)
if __name__ == "__main__":
# Grab configuration values.
config = ConfigParser()
config.read('config/config.ini')
client_id = config.get('main', 'client_id')
redirect_uri = config.get('main', 'redirect_uri')
credentials = config.get('main','json_path')
# Define the Secret Key.
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
# Define the App Configurations.
app.config['auth_client'] = FlaskTDAuth(
client_id=client_id,
redirect_uri=redirect_uri,
credentials_file=pathlib.Path(credentials)
)
# Run the App.
app.run(
ssl_context=('td/certs/cert.pem', 'td/certs/key.pem'),
host='localhost',
port=5000,
debug=True
)
# flask_td_app = FlaskAppTD(client_id=client_id, redirect_uri=redirect_uri, credentials_file=credentials)
# flask_td_app.run()
# This allows us to use a plain HTTP callback
# os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
# # app.run(ssl_context="adhoc")
|
@app.route("/login")
def demo():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
# Build the authorization URL.
auth_tuple = app.config['auth_client'].authorization_url()
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = auth_tuple[1]
return redirect(auth_tuple[0])
| 33 | 47 |
import os
import pathlib
from flask import Flask
from flask import request
from flask import redirect
from flask import url_for
from flask import session
from flask import render_template
from flask.json import jsonify
from td.app.auth import FlaskTDAuth
from configparser import ConfigParser
# Define the templates folder.
template_folder_path: pathlib.Path = pathlib.Path(__file__).parents[0]
template_folder_path: pathlib.Path = template_folder_path.joinpath('templates')
# Create the App.
app = Flask('TD_oAuth_App', template_folder=template_folder_path.resolve())
@app.route("/")
def home():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
return render_template("index.html")
@app.route("/login")
def demo():
"""Step 1: User Authorization.
Redirect the user/resource owner to the OAuth provider (i.e. Github)
using an URL with a few key OAuth parameters.
"""
# Build the authorization URL.
auth_tuple = app.config['auth_client'].authorization_url()
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = auth_tuple[1]
return redirect(auth_tuple[0])
@app.route("/login/callback", methods=["GET"])
def callback():
""" Step 3: Retrieving an access token.
The user has been redirected back from the provider to your registered
callback URL. With this redirection comes an authorization code included
in the redirect URL. We will use that to obtain an access token.
"""
# Grab the Refresh and Access Token.
token_dict = app.config['auth_client'].grab_access_token_and_refresh_token(url=request.url)
# Store it in the Session.
session['oauth_token'] = token_dict
if app.config['call_close']:
return redirect(url_for('shutdown'))
return jsonify(token_dict)
@app.route("/login/refresh", methods=["GET"])
def refresh():
# Grab the Refresh Token.
refresh_token_dict = app.config['auth_client'].grab_refresh_token()
return jsonify(refresh_token_dict)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
def run(flask_client: FlaskTDAuth, close_after: bool = False):
certs_pem = pathlib.Path(__file__).parents[0].joinpath('certs/cert.pem')
certs_key = pathlib.Path(__file__).parents[0].joinpath('certs/key.pem')
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
app.config['auth_client'] = flask_client
app.config['call_close'] = close_after
app.run(
ssl_context=(certs_pem, certs_key),
host='localhost',
port=5000,
debug=True
)
if __name__ == "__main__":
# Grab configuration values.
config = ConfigParser()
config.read('config/config.ini')
client_id = config.get('main', 'client_id')
redirect_uri = config.get('main', 'redirect_uri')
credentials = config.get('main','json_path')
# Define the Secret Key.
app.secret_key = os.environ.get("SECRET_KEY") or os.urandom(24)
# Define the App Configurations.
app.config['auth_client'] = FlaskTDAuth(
client_id=client_id,
redirect_uri=redirect_uri,
credentials_file=pathlib.Path(credentials)
)
# Run the App.
app.run(
ssl_context=('td/certs/cert.pem', 'td/certs/key.pem'),
host='localhost',
port=5000,
debug=True
)
# flask_td_app = FlaskAppTD(client_id=client_id, redirect_uri=redirect_uri, credentials_file=credentials)
# flask_td_app.run()
# This allows us to use a plain HTTP callback
# os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = "1"
# # app.run(ssl_context="adhoc")
|
_test_repr_or_str
|
Test Queue's repr or str.
fn is repr or str. expect_id is True if we expect the Queue's id to
appear in fn(Queue()).
|
"""Tests for queues.py"""
import sys
import unittest
from unittest import mock
import asyncio
from .. import utils as test_utils
class _QueueTestBase(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
class QueueBasicTests(_QueueTestBase):
# MASKED: _test_repr_or_str function (lines 18-68)
def test_ctor_loop(self):
loop = mock.Mock()
q = asyncio.Queue(loop=loop)
self.assertIs(q._loop, loop)
q = asyncio.Queue(loop=self.loop)
self.assertIs(q._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
q = asyncio.Queue()
self.assertIs(q._loop, self.loop)
def test_repr(self):
self._test_repr_or_str(repr, True)
def test_str(self):
self._test_repr_or_str(str, False)
def test_empty(self):
q = asyncio.Queue(loop=self.loop)
self.assertTrue(q.empty())
q.put_nowait(1)
self.assertFalse(q.empty())
self.assertEqual(1, q.get_nowait())
self.assertTrue(q.empty())
def test_full(self):
q = asyncio.Queue(loop=self.loop)
self.assertFalse(q.full())
q = asyncio.Queue(maxsize=1, loop=self.loop)
q.put_nowait(1)
self.assertTrue(q.full())
def test_order(self):
q = asyncio.Queue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([1, 3, 2], items)
def test_maxsize(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0.01
self.assertAlmostEqual(0.02, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(maxsize=2, loop=loop)
self.assertEqual(2, q.maxsize)
have_been_put = []
@asyncio.coroutine
def putter():
for i in range(3):
yield from q.put(i)
have_been_put.append(i)
return True
@asyncio.coroutine
def test():
t = asyncio.Task(putter(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop)
# The putter is blocked after putting two items.
self.assertEqual([0, 1], have_been_put)
self.assertEqual(0, q.get_nowait())
# Let the putter resume and put last item.
yield from asyncio.sleep(0.01, loop=loop)
self.assertEqual([0, 1, 2], have_been_put)
self.assertEqual(1, q.get_nowait())
self.assertEqual(2, q.get_nowait())
self.assertTrue(t.done())
self.assertTrue(t.result())
loop.run_until_complete(test())
self.assertAlmostEqual(0.02, loop.time())
class QueueGetTests(_QueueTestBase):
def test_blocking_get(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
@asyncio.coroutine
def queue_get():
return (yield from q.get())
res = self.loop.run_until_complete(queue_get())
self.assertEqual(1, res)
def test_get_with_putters(self):
q = asyncio.Queue(1, loop=self.loop)
q.put_nowait(1)
waiter = asyncio.Future(loop=self.loop)
q._putters.append(waiter)
res = self.loop.run_until_complete(q.get())
self.assertEqual(1, res)
self.assertTrue(waiter.done())
self.assertIsNone(waiter.result())
def test_blocking_get_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
started = asyncio.Event(loop=loop)
finished = False
@asyncio.coroutine
def queue_get():
nonlocal finished
started.set()
res = yield from q.get()
finished = True
return res
@asyncio.coroutine
def queue_put():
loop.call_later(0.01, q.put_nowait, 1)
queue_get_task = asyncio.Task(queue_get(), loop=loop)
yield from started.wait()
self.assertFalse(finished)
res = yield from queue_get_task
self.assertTrue(finished)
return res
res = loop.run_until_complete(queue_put())
self.assertEqual(1, res)
self.assertAlmostEqual(0.01, loop.time())
def test_nonblocking_get(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
self.assertEqual(1, q.get_nowait())
def test_nonblocking_get_exception(self):
q = asyncio.Queue(loop=self.loop)
self.assertRaises(asyncio.QueueEmpty, q.get_nowait)
def test_get_cancelled(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0.01
self.assertAlmostEqual(0.061, when)
yield 0.05
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
@asyncio.coroutine
def queue_get():
return (yield from asyncio.wait_for(q.get(), 0.051, loop=loop))
@asyncio.coroutine
def test():
get_task = asyncio.Task(queue_get(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop) # let the task start
q.put_nowait(1)
return (yield from get_task)
self.assertEqual(1, loop.run_until_complete(test()))
self.assertAlmostEqual(0.06, loop.time())
def test_get_cancelled_race(self):
q = asyncio.Queue(loop=self.loop)
t1 = asyncio.Task(q.get(), loop=self.loop)
t2 = asyncio.Task(q.get(), loop=self.loop)
test_utils.run_briefly(self.loop)
t1.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(t1.done())
q.put_nowait('a')
test_utils.run_briefly(self.loop)
self.assertEqual(t2.result(), 'a')
def test_get_with_waiting_putters(self):
q = asyncio.Queue(loop=self.loop, maxsize=1)
asyncio.Task(q.put('a'), loop=self.loop)
asyncio.Task(q.put('b'), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(self.loop.run_until_complete(q.get()), 'a')
self.assertEqual(self.loop.run_until_complete(q.get()), 'b')
def test_why_are_getters_waiting(self):
# From issue #268.
@asyncio.coroutine
def consumer(queue, num_expected):
for _ in range(num_expected):
yield from queue.get()
@asyncio.coroutine
def producer(queue, num_items):
for i in range(num_items):
yield from queue.put(i)
queue_size = 1
producer_num_items = 5
q = asyncio.Queue(queue_size, loop=self.loop)
self.loop.run_until_complete(
asyncio.gather(
producer(q, producer_num_items), consumer(q, producer_num_items), loop=self.loop
),
)
@unittest.skipIf(sys.version_info < (3, 6, 4), "Changed in 3.6.4")
def test_cancelled_getters_not_being_held_in_self_getters(self):
def a_generator():
yield 0.1
yield 0.2
self.loop = self.new_test_loop(a_generator)
@asyncio.coroutine
def consumer(queue):
try:
yield from asyncio.wait_for(queue.get(), 0.1, loop=self.loop)
except asyncio.TimeoutError:
pass
queue = asyncio.Queue(loop=self.loop, maxsize=5)
self.loop.run_until_complete(self.loop.create_task(consumer(queue)))
self.assertEqual(len(queue._getters), 0)
class QueuePutTests(_QueueTestBase):
def test_blocking_put(self):
q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine
def queue_put():
# No maxsize, won't block.
yield from q.put(1)
self.loop.run_until_complete(queue_put())
def test_blocking_put_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(maxsize=1, loop=loop)
started = asyncio.Event(loop=loop)
finished = False
@asyncio.coroutine
def queue_put():
nonlocal finished
started.set()
yield from q.put(1)
yield from q.put(2)
finished = True
@asyncio.coroutine
def queue_get():
loop.call_later(0.01, q.get_nowait)
queue_put_task = asyncio.Task(queue_put(), loop=loop)
yield from started.wait()
self.assertFalse(finished)
yield from queue_put_task
self.assertTrue(finished)
loop.run_until_complete(queue_get())
self.assertAlmostEqual(0.01, loop.time())
def test_nonblocking_put(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
self.assertEqual(1, q.get_nowait())
def test_get_cancel_drop_one_pending_reader(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
reader = loop.create_task(q.get())
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
q.put_nowait(1)
q.put_nowait(2)
reader.cancel()
try:
loop.run_until_complete(reader)
except asyncio.CancelledError:
# try again
reader = loop.create_task(q.get())
loop.run_until_complete(reader)
result = reader.result()
# if we get 2, it means 1 got dropped!
self.assertEqual(1, result)
def test_get_cancel_drop_many_pending_readers(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
loop.set_debug(True)
q = asyncio.Queue(loop=loop)
reader1 = loop.create_task(q.get())
reader2 = loop.create_task(q.get())
reader3 = loop.create_task(q.get())
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
q.put_nowait(1)
q.put_nowait(2)
reader1.cancel()
try:
loop.run_until_complete(reader1)
except asyncio.CancelledError:
pass
loop.run_until_complete(reader3)
# It is undefined in which order concurrent readers receive results.
self.assertEqual({reader2.result(), reader3.result()}, {1, 2})
def test_put_cancel_drop(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(1, loop=loop)
q.put_nowait(1)
# putting a second item in the queue has to block (qsize=1)
writer = loop.create_task(q.put(2))
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
value1 = q.get_nowait()
self.assertEqual(value1, 1)
writer.cancel()
try:
loop.run_until_complete(writer)
except asyncio.CancelledError:
# try again
writer = loop.create_task(q.put(2))
loop.run_until_complete(writer)
value2 = q.get_nowait()
self.assertEqual(value2, 2)
self.assertEqual(q.qsize(), 0)
def test_nonblocking_put_exception(self):
q = asyncio.Queue(maxsize=1, loop=self.loop)
q.put_nowait(1)
self.assertRaises(asyncio.QueueFull, q.put_nowait, 2)
def test_float_maxsize(self):
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
q.put_nowait(1)
q.put_nowait(2)
self.assertTrue(q.full())
self.assertRaises(asyncio.QueueFull, q.put_nowait, 3)
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
@asyncio.coroutine
def queue_put():
yield from q.put(1)
yield from q.put(2)
self.assertTrue(q.full())
self.loop.run_until_complete(queue_put())
def test_put_cancelled(self):
q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine
def queue_put():
yield from q.put(1)
return True
@asyncio.coroutine
def test():
return (yield from q.get())
t = asyncio.Task(queue_put(), loop=self.loop)
self.assertEqual(1, self.loop.run_until_complete(test()))
self.assertTrue(t.done())
self.assertTrue(t.result())
def test_put_cancelled_race(self):
q = asyncio.Queue(loop=self.loop, maxsize=1)
put_a = asyncio.Task(q.put('a'), loop=self.loop)
put_b = asyncio.Task(q.put('b'), loop=self.loop)
put_c = asyncio.Task(q.put('X'), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(put_a.done())
self.assertFalse(put_b.done())
put_c.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(put_c.done())
self.assertEqual(q.get_nowait(), 'a')
test_utils.run_briefly(self.loop)
self.assertEqual(q.get_nowait(), 'b')
self.loop.run_until_complete(put_b)
def test_put_with_waiting_getters(self):
q = asyncio.Queue(loop=self.loop)
t = asyncio.Task(q.get(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.loop.run_until_complete(q.put('a'))
self.assertEqual(self.loop.run_until_complete(t), 'a')
def test_why_are_putters_waiting(self):
# From issue #265.
queue = asyncio.Queue(2, loop=self.loop)
@asyncio.coroutine
def putter(item):
yield from queue.put(item)
@asyncio.coroutine
def getter():
yield
num = queue.qsize()
for _ in range(num):
queue.get_nowait()
t0 = putter(0)
t1 = putter(1)
t2 = putter(2)
t3 = putter(3)
self.loop.run_until_complete(asyncio.gather(getter(), t0, t1, t2, t3, loop=self.loop))
class LifoQueueTests(_QueueTestBase):
def test_order(self):
q = asyncio.LifoQueue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([2, 3, 1], items)
class PriorityQueueTests(_QueueTestBase):
def test_order(self):
q = asyncio.PriorityQueue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([1, 2, 3], items)
class _QueueJoinTestMixin:
q_class = None
def test_task_done_underflow(self):
q = self.q_class(loop=self.loop)
self.assertRaises(ValueError, q.task_done)
def test_task_done(self):
q = self.q_class(loop=self.loop)
for i in range(100):
q.put_nowait(i)
accumulator = 0
# Two workers get items from the queue and call task_done after each.
# Join the queue and assert all items have been processed.
running = True
@asyncio.coroutine
def worker():
nonlocal accumulator
while running:
item = yield from q.get()
accumulator += item
q.task_done()
@asyncio.coroutine
def test():
tasks = [asyncio.Task(worker(), loop=self.loop) for index in range(2)]
yield from q.join()
return tasks
tasks = self.loop.run_until_complete(test())
self.assertEqual(sum(range(100)), accumulator)
# close running generators
running = False
for i in range(len(tasks)):
q.put_nowait(0)
self.loop.run_until_complete(asyncio.wait(tasks, loop=self.loop))
def test_join_empty_queue(self):
q = self.q_class(loop=self.loop)
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
@asyncio.coroutine
def join():
yield from q.join()
yield from q.join()
self.loop.run_until_complete(join())
def test_format(self):
q = self.q_class(loop=self.loop)
self.assertEqual(q._format(), 'maxsize=0')
q._unfinished_tasks = 2
self.assertEqual(q._format(), 'maxsize=0 tasks=2')
class QueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.Queue
class LifoQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.LifoQueue
class PriorityQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.PriorityQueue
if __name__ == '__main__':
unittest.main()
|
def _test_repr_or_str(self, fn, expect_id):
"""Test Queue's repr or str.
fn is repr or str. expect_id is True if we expect the Queue's id to
appear in fn(Queue()).
"""
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0.1
self.assertAlmostEqual(0.2, when)
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
self.assertTrue(fn(q).startswith('<Queue'), fn(q))
id_is_present = hex(id(q)) in fn(q)
self.assertEqual(expect_id, id_is_present)
@asyncio.coroutine
def add_getter():
q = asyncio.Queue(loop=loop)
# Start a task that waits to get.
asyncio.Task(q.get(), loop=loop)
# Let it start waiting.
yield from asyncio.sleep(0.1, loop=loop)
self.assertTrue('_getters[1]' in fn(q))
# resume q.get coroutine to finish generator
q.put_nowait(0)
loop.run_until_complete(add_getter())
@asyncio.coroutine
def add_putter():
q = asyncio.Queue(maxsize=1, loop=loop)
q.put_nowait(1)
# Start a task that waits to put.
asyncio.Task(q.put(2), loop=loop)
# Let it start waiting.
yield from asyncio.sleep(0.1, loop=loop)
self.assertTrue('_putters[1]' in fn(q))
# resume q.put coroutine to finish generator
q.get_nowait()
loop.run_until_complete(add_putter())
q = asyncio.Queue(loop=loop)
q.put_nowait(1)
self.assertTrue('_queue=[1]' in fn(q))
| 18 | 68 |
"""Tests for queues.py"""
import sys
import unittest
from unittest import mock
import asyncio
from .. import utils as test_utils
class _QueueTestBase(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
class QueueBasicTests(_QueueTestBase):
def _test_repr_or_str(self, fn, expect_id):
"""Test Queue's repr or str.
fn is repr or str. expect_id is True if we expect the Queue's id to
appear in fn(Queue()).
"""
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0.1
self.assertAlmostEqual(0.2, when)
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
self.assertTrue(fn(q).startswith('<Queue'), fn(q))
id_is_present = hex(id(q)) in fn(q)
self.assertEqual(expect_id, id_is_present)
@asyncio.coroutine
def add_getter():
q = asyncio.Queue(loop=loop)
# Start a task that waits to get.
asyncio.Task(q.get(), loop=loop)
# Let it start waiting.
yield from asyncio.sleep(0.1, loop=loop)
self.assertTrue('_getters[1]' in fn(q))
# resume q.get coroutine to finish generator
q.put_nowait(0)
loop.run_until_complete(add_getter())
@asyncio.coroutine
def add_putter():
q = asyncio.Queue(maxsize=1, loop=loop)
q.put_nowait(1)
# Start a task that waits to put.
asyncio.Task(q.put(2), loop=loop)
# Let it start waiting.
yield from asyncio.sleep(0.1, loop=loop)
self.assertTrue('_putters[1]' in fn(q))
# resume q.put coroutine to finish generator
q.get_nowait()
loop.run_until_complete(add_putter())
q = asyncio.Queue(loop=loop)
q.put_nowait(1)
self.assertTrue('_queue=[1]' in fn(q))
def test_ctor_loop(self):
loop = mock.Mock()
q = asyncio.Queue(loop=loop)
self.assertIs(q._loop, loop)
q = asyncio.Queue(loop=self.loop)
self.assertIs(q._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
q = asyncio.Queue()
self.assertIs(q._loop, self.loop)
def test_repr(self):
self._test_repr_or_str(repr, True)
def test_str(self):
self._test_repr_or_str(str, False)
def test_empty(self):
q = asyncio.Queue(loop=self.loop)
self.assertTrue(q.empty())
q.put_nowait(1)
self.assertFalse(q.empty())
self.assertEqual(1, q.get_nowait())
self.assertTrue(q.empty())
def test_full(self):
q = asyncio.Queue(loop=self.loop)
self.assertFalse(q.full())
q = asyncio.Queue(maxsize=1, loop=self.loop)
q.put_nowait(1)
self.assertTrue(q.full())
def test_order(self):
q = asyncio.Queue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([1, 3, 2], items)
def test_maxsize(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0.01
self.assertAlmostEqual(0.02, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(maxsize=2, loop=loop)
self.assertEqual(2, q.maxsize)
have_been_put = []
@asyncio.coroutine
def putter():
for i in range(3):
yield from q.put(i)
have_been_put.append(i)
return True
@asyncio.coroutine
def test():
t = asyncio.Task(putter(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop)
# The putter is blocked after putting two items.
self.assertEqual([0, 1], have_been_put)
self.assertEqual(0, q.get_nowait())
# Let the putter resume and put last item.
yield from asyncio.sleep(0.01, loop=loop)
self.assertEqual([0, 1, 2], have_been_put)
self.assertEqual(1, q.get_nowait())
self.assertEqual(2, q.get_nowait())
self.assertTrue(t.done())
self.assertTrue(t.result())
loop.run_until_complete(test())
self.assertAlmostEqual(0.02, loop.time())
class QueueGetTests(_QueueTestBase):
def test_blocking_get(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
@asyncio.coroutine
def queue_get():
return (yield from q.get())
res = self.loop.run_until_complete(queue_get())
self.assertEqual(1, res)
def test_get_with_putters(self):
q = asyncio.Queue(1, loop=self.loop)
q.put_nowait(1)
waiter = asyncio.Future(loop=self.loop)
q._putters.append(waiter)
res = self.loop.run_until_complete(q.get())
self.assertEqual(1, res)
self.assertTrue(waiter.done())
self.assertIsNone(waiter.result())
def test_blocking_get_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
started = asyncio.Event(loop=loop)
finished = False
@asyncio.coroutine
def queue_get():
nonlocal finished
started.set()
res = yield from q.get()
finished = True
return res
@asyncio.coroutine
def queue_put():
loop.call_later(0.01, q.put_nowait, 1)
queue_get_task = asyncio.Task(queue_get(), loop=loop)
yield from started.wait()
self.assertFalse(finished)
res = yield from queue_get_task
self.assertTrue(finished)
return res
res = loop.run_until_complete(queue_put())
self.assertEqual(1, res)
self.assertAlmostEqual(0.01, loop.time())
def test_nonblocking_get(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
self.assertEqual(1, q.get_nowait())
def test_nonblocking_get_exception(self):
q = asyncio.Queue(loop=self.loop)
self.assertRaises(asyncio.QueueEmpty, q.get_nowait)
def test_get_cancelled(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0.01
self.assertAlmostEqual(0.061, when)
yield 0.05
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
@asyncio.coroutine
def queue_get():
return (yield from asyncio.wait_for(q.get(), 0.051, loop=loop))
@asyncio.coroutine
def test():
get_task = asyncio.Task(queue_get(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop) # let the task start
q.put_nowait(1)
return (yield from get_task)
self.assertEqual(1, loop.run_until_complete(test()))
self.assertAlmostEqual(0.06, loop.time())
def test_get_cancelled_race(self):
q = asyncio.Queue(loop=self.loop)
t1 = asyncio.Task(q.get(), loop=self.loop)
t2 = asyncio.Task(q.get(), loop=self.loop)
test_utils.run_briefly(self.loop)
t1.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(t1.done())
q.put_nowait('a')
test_utils.run_briefly(self.loop)
self.assertEqual(t2.result(), 'a')
def test_get_with_waiting_putters(self):
q = asyncio.Queue(loop=self.loop, maxsize=1)
asyncio.Task(q.put('a'), loop=self.loop)
asyncio.Task(q.put('b'), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(self.loop.run_until_complete(q.get()), 'a')
self.assertEqual(self.loop.run_until_complete(q.get()), 'b')
def test_why_are_getters_waiting(self):
# From issue #268.
@asyncio.coroutine
def consumer(queue, num_expected):
for _ in range(num_expected):
yield from queue.get()
@asyncio.coroutine
def producer(queue, num_items):
for i in range(num_items):
yield from queue.put(i)
queue_size = 1
producer_num_items = 5
q = asyncio.Queue(queue_size, loop=self.loop)
self.loop.run_until_complete(
asyncio.gather(
producer(q, producer_num_items), consumer(q, producer_num_items), loop=self.loop
),
)
@unittest.skipIf(sys.version_info < (3, 6, 4), "Changed in 3.6.4")
def test_cancelled_getters_not_being_held_in_self_getters(self):
def a_generator():
yield 0.1
yield 0.2
self.loop = self.new_test_loop(a_generator)
@asyncio.coroutine
def consumer(queue):
try:
yield from asyncio.wait_for(queue.get(), 0.1, loop=self.loop)
except asyncio.TimeoutError:
pass
queue = asyncio.Queue(loop=self.loop, maxsize=5)
self.loop.run_until_complete(self.loop.create_task(consumer(queue)))
self.assertEqual(len(queue._getters), 0)
class QueuePutTests(_QueueTestBase):
def test_blocking_put(self):
q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine
def queue_put():
# No maxsize, won't block.
yield from q.put(1)
self.loop.run_until_complete(queue_put())
def test_blocking_put_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(maxsize=1, loop=loop)
started = asyncio.Event(loop=loop)
finished = False
@asyncio.coroutine
def queue_put():
nonlocal finished
started.set()
yield from q.put(1)
yield from q.put(2)
finished = True
@asyncio.coroutine
def queue_get():
loop.call_later(0.01, q.get_nowait)
queue_put_task = asyncio.Task(queue_put(), loop=loop)
yield from started.wait()
self.assertFalse(finished)
yield from queue_put_task
self.assertTrue(finished)
loop.run_until_complete(queue_get())
self.assertAlmostEqual(0.01, loop.time())
def test_nonblocking_put(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
self.assertEqual(1, q.get_nowait())
def test_get_cancel_drop_one_pending_reader(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
reader = loop.create_task(q.get())
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
q.put_nowait(1)
q.put_nowait(2)
reader.cancel()
try:
loop.run_until_complete(reader)
except asyncio.CancelledError:
# try again
reader = loop.create_task(q.get())
loop.run_until_complete(reader)
result = reader.result()
# if we get 2, it means 1 got dropped!
self.assertEqual(1, result)
def test_get_cancel_drop_many_pending_readers(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
loop.set_debug(True)
q = asyncio.Queue(loop=loop)
reader1 = loop.create_task(q.get())
reader2 = loop.create_task(q.get())
reader3 = loop.create_task(q.get())
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
q.put_nowait(1)
q.put_nowait(2)
reader1.cancel()
try:
loop.run_until_complete(reader1)
except asyncio.CancelledError:
pass
loop.run_until_complete(reader3)
# It is undefined in which order concurrent readers receive results.
self.assertEqual({reader2.result(), reader3.result()}, {1, 2})
def test_put_cancel_drop(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(1, loop=loop)
q.put_nowait(1)
# putting a second item in the queue has to block (qsize=1)
writer = loop.create_task(q.put(2))
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
value1 = q.get_nowait()
self.assertEqual(value1, 1)
writer.cancel()
try:
loop.run_until_complete(writer)
except asyncio.CancelledError:
# try again
writer = loop.create_task(q.put(2))
loop.run_until_complete(writer)
value2 = q.get_nowait()
self.assertEqual(value2, 2)
self.assertEqual(q.qsize(), 0)
def test_nonblocking_put_exception(self):
q = asyncio.Queue(maxsize=1, loop=self.loop)
q.put_nowait(1)
self.assertRaises(asyncio.QueueFull, q.put_nowait, 2)
def test_float_maxsize(self):
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
q.put_nowait(1)
q.put_nowait(2)
self.assertTrue(q.full())
self.assertRaises(asyncio.QueueFull, q.put_nowait, 3)
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
@asyncio.coroutine
def queue_put():
yield from q.put(1)
yield from q.put(2)
self.assertTrue(q.full())
self.loop.run_until_complete(queue_put())
def test_put_cancelled(self):
q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine
def queue_put():
yield from q.put(1)
return True
@asyncio.coroutine
def test():
return (yield from q.get())
t = asyncio.Task(queue_put(), loop=self.loop)
self.assertEqual(1, self.loop.run_until_complete(test()))
self.assertTrue(t.done())
self.assertTrue(t.result())
def test_put_cancelled_race(self):
q = asyncio.Queue(loop=self.loop, maxsize=1)
put_a = asyncio.Task(q.put('a'), loop=self.loop)
put_b = asyncio.Task(q.put('b'), loop=self.loop)
put_c = asyncio.Task(q.put('X'), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(put_a.done())
self.assertFalse(put_b.done())
put_c.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(put_c.done())
self.assertEqual(q.get_nowait(), 'a')
test_utils.run_briefly(self.loop)
self.assertEqual(q.get_nowait(), 'b')
self.loop.run_until_complete(put_b)
def test_put_with_waiting_getters(self):
q = asyncio.Queue(loop=self.loop)
t = asyncio.Task(q.get(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.loop.run_until_complete(q.put('a'))
self.assertEqual(self.loop.run_until_complete(t), 'a')
def test_why_are_putters_waiting(self):
# From issue #265.
queue = asyncio.Queue(2, loop=self.loop)
@asyncio.coroutine
def putter(item):
yield from queue.put(item)
@asyncio.coroutine
def getter():
yield
num = queue.qsize()
for _ in range(num):
queue.get_nowait()
t0 = putter(0)
t1 = putter(1)
t2 = putter(2)
t3 = putter(3)
self.loop.run_until_complete(asyncio.gather(getter(), t0, t1, t2, t3, loop=self.loop))
class LifoQueueTests(_QueueTestBase):
def test_order(self):
q = asyncio.LifoQueue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([2, 3, 1], items)
class PriorityQueueTests(_QueueTestBase):
def test_order(self):
q = asyncio.PriorityQueue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([1, 2, 3], items)
class _QueueJoinTestMixin:
q_class = None
def test_task_done_underflow(self):
q = self.q_class(loop=self.loop)
self.assertRaises(ValueError, q.task_done)
def test_task_done(self):
q = self.q_class(loop=self.loop)
for i in range(100):
q.put_nowait(i)
accumulator = 0
# Two workers get items from the queue and call task_done after each.
# Join the queue and assert all items have been processed.
running = True
@asyncio.coroutine
def worker():
nonlocal accumulator
while running:
item = yield from q.get()
accumulator += item
q.task_done()
@asyncio.coroutine
def test():
tasks = [asyncio.Task(worker(), loop=self.loop) for index in range(2)]
yield from q.join()
return tasks
tasks = self.loop.run_until_complete(test())
self.assertEqual(sum(range(100)), accumulator)
# close running generators
running = False
for i in range(len(tasks)):
q.put_nowait(0)
self.loop.run_until_complete(asyncio.wait(tasks, loop=self.loop))
def test_join_empty_queue(self):
q = self.q_class(loop=self.loop)
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
@asyncio.coroutine
def join():
yield from q.join()
yield from q.join()
self.loop.run_until_complete(join())
def test_format(self):
q = self.q_class(loop=self.loop)
self.assertEqual(q._format(), 'maxsize=0')
q._unfinished_tasks = 2
self.assertEqual(q._format(), 'maxsize=0 tasks=2')
class QueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.Queue
class LifoQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.LifoQueue
class PriorityQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.PriorityQueue
if __name__ == '__main__':
unittest.main()
|
get_git_version
|
Gets application version in the format [last-tag]-[last-commit-sha].
:param strip_v_in_version: If the version tag starts with 'v' (like 'v1.2.3),
this chooses if the 'v' should be stripped, so the resulting tag is '1.2.3'.
If there's a "-", "." or "_" separator after "v", it is removed as well.
:return: The version string
|
"""Module with git related utilities."""
import git
class GitRepoVersionInfo:
"""
Provides application versions information based on the tags and commits in the repo
"""
def __init__(self, path: str):
"""
Create an instance of GitRepoVersionInfo
:param path: The path to search for git information. It searches for '.git' in this folder or any parent
folder.
"""
self._is_repo = False
try:
self._repo = git.Repo(path, search_parent_directories=True)
self._is_repo = True
except git.exc.InvalidGitRepositoryError:
self._repo = None
@property
def is_git_repo(self) -> bool:
"""
Checks if the path given in constructor is a sub-path of a valid git repo.
:return: Boolean true, if repo was found.
"""
return self._is_repo
# MASKED: get_git_version function (lines 31-52)
|
def get_git_version(self, strip_v_in_version: bool = True) -> str:
"""
Gets application version in the format [last-tag]-[last-commit-sha].
:param strip_v_in_version: If the version tag starts with 'v' (like 'v1.2.3),
this chooses if the 'v' should be stripped, so the resulting tag is '1.2.3'.
If there's a "-", "." or "_" separator after "v", it is removed as well.
:return: The version string
"""
if not self._is_repo:
raise git.exc.InvalidGitRepositoryError()
tags = sorted(self._repo.tags, key=lambda t: t.commit.committed_date)
latest_tag = None if len(tags) == 0 else tags[-1]
ver = "0.0.0" if latest_tag is None else latest_tag.name
if strip_v_in_version and ver.startswith("v"):
txt_ver = ver.lstrip("v")
txt_ver = txt_ver.lstrip("-_.")
else:
txt_ver = ver
sha = self._repo.head.commit.hexsha
if latest_tag is not None and sha == latest_tag.commit.hexsha:
return txt_ver
return f"{txt_ver}-{sha}"
| 31 | 52 |
"""Module with git related utilities."""
import git
class GitRepoVersionInfo:
"""
Provides application versions information based on the tags and commits in the repo
"""
def __init__(self, path: str):
"""
Create an instance of GitRepoVersionInfo
:param path: The path to search for git information. It searches for '.git' in this folder or any parent
folder.
"""
self._is_repo = False
try:
self._repo = git.Repo(path, search_parent_directories=True)
self._is_repo = True
except git.exc.InvalidGitRepositoryError:
self._repo = None
@property
def is_git_repo(self) -> bool:
"""
Checks if the path given in constructor is a sub-path of a valid git repo.
:return: Boolean true, if repo was found.
"""
return self._is_repo
def get_git_version(self, strip_v_in_version: bool = True) -> str:
"""
Gets application version in the format [last-tag]-[last-commit-sha].
:param strip_v_in_version: If the version tag starts with 'v' (like 'v1.2.3),
this chooses if the 'v' should be stripped, so the resulting tag is '1.2.3'.
If there's a "-", "." or "_" separator after "v", it is removed as well.
:return: The version string
"""
if not self._is_repo:
raise git.exc.InvalidGitRepositoryError()
tags = sorted(self._repo.tags, key=lambda t: t.commit.committed_date)
latest_tag = None if len(tags) == 0 else tags[-1]
ver = "0.0.0" if latest_tag is None else latest_tag.name
if strip_v_in_version and ver.startswith("v"):
txt_ver = ver.lstrip("v")
txt_ver = txt_ver.lstrip("-_.")
else:
txt_ver = ver
sha = self._repo.head.commit.hexsha
if latest_tag is not None and sha == latest_tag.commit.hexsha:
return txt_ver
return f"{txt_ver}-{sha}"
|
test
|
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
|
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
# MASKED: test function (lines 491-525)
|
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
| 491 | 525 |
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
|
__init__
|
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
|
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
# MASKED: __init__ function (lines 30-86)
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
|
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
| 30 | 86 |
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
|
compute_rtgs
|
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
|
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
# MASKED: compute_rtgs function (lines 288-316)
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
|
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
| 288 | 316 |
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
|
_init_hyperparameters
|
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
|
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
# MASKED: _init_hyperparameters function (lines 399-437)
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
|
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
| 399 | 437 |
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
|
_log_summary
|
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
|
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
# MASKED: _log_summary function (lines 439-488)
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
|
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
| 439 | 488 |
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
|
baseline_re_single_analysis
|
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
|
"""
Methods for assessing treatment of finite-precision issues
"""
import os
import sys
import time
import multiprocessing as mp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import plotter as ptr
import rnn_fxpts as rfx
import fxpt_experiments as fe
import pickle as pkl
def get_relative_errors(test_data_id):
"""
Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
"""
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for alg in ['traverse','baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print('%s, alg %s, N %d,samp %d'%(test_data_id,alg,N,samp))
npz = np.load('results/%s_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp))
W = npz['W']
fxV = npz['fxV']
fxV, converged = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = np.tanh(W.dot(fxV))-fxV
re = np.fabs(f/margin)
re_fx, re_un = re[:,converged].max(axis=0), re[:,~converged].max(axis=0)
re_fx = re_fx[re_fx > 0]
f_fx, f_un = np.fabs(f[:,converged]).max(axis=0), np.fabs(f[:,~converged]).max(axis=0)
f_fx = f_fx[f_fx > 0]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file('results/%s_re_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp), **re_npz)
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo = 10*(int(np.log2(m_fx).min()/10)-1)
if m_un.shape[0] > 0: hi = 10*(int(np.log2(m_un).max()/10)+1)
else: hi = 0
plt.xticks(range(-10,1,2),['']+['$2^{%d}$'%yl for yl in range(-8,1,2)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Error')
plt.show()
# MASKED: baseline_re_single_analysis function (lines 78-91)
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
"""
Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
"""
percents = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id,N,samp,cap=cap))
percents = np.array(percents)
print('mean %%: %f%%'%percents.mean())
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo, hi = -20,50
plt.xticks(range(lo,hi+1,10),[''] + ['$2^{%d}$'%yl for yl in range(lo+10,hi+1,10)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show()
def get_baseline_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running baseline rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/baseline_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_baseline_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_baseline_rd(*args)
def run_baseline_rd(test_data_id, Ns, num_procs):
"""
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/baseline_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_traverse_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running traverse rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/traverse_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_traverse_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_traverse_rd(*args)
def run_traverse_rd(test_data_id, Ns, num_procs):
"""
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/traverse_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_simple_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
rfx.hardwrite(logfile,'Running simple rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
buckets = {}
bins = np.arange(-1025,3)
for method_key in ['traverse','baseline']:
npz = fe.load_npz_file('results/%s_%s_N_%d_s_%d.npz'%(method_key,test_data_id,N,samp))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros(len(bins)-1)
if cap is not None and fxV.shape[1] > cap:
rfx.hardwrite(logfile,'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile,'disting %d of %d...\n'%(j,fxV.shape[1]))
dists = np.fabs(fxV-fxV[:,[j]]).max(axis=0)
dists[dists == 0] = 2.0**bins[0]
logdists = np.log2(dists)
logdists[logdists < bins[0]] = bins[0]
logdists[logdists > bins[-1]] = bins[-1]
hist,_ = np.histogram(logdists,bins=bins)
buckets[method_key] += hist
npz = {'bins':bins,'traverse_buckets':buckets['traverse'],'baseline_buckets':buckets['baseline']}
fe.save_npz_file('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
rfx.hardwrite(logfile,'Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_simple_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_simple_rd(*args)
def run_simple_rd(test_data_id, Ns, num_procs):
"""
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 1000
for s in range(S):
logfilename = 'logs/simple_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all(): out_rr[:] = 4*in_rr.max()
else: out_rr[np.isinf(out_rr)] = 4*out_rr[~np.isinf(out_rr)].max()
print('out_rr:')
print(out_rr.shape)
print((out_rr==0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
# if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
# plt.hist(in_rr,bins=10,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
if buckets is None:
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8,2.4))
# plt.hist(buckets,bins=bins,log=log)
if log:
buckets[buckets > 0] = np.log2(buckets[buckets > 0])
plt.bar(left=bins[:-1],height=buckets,width=bins[1:]-bins[:-1],facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$') #'Max Coordinate-wise Distance')
xmin_idx = int(((bins[:-1] > -1000) & (buckets > 0)).argmax())
xstep = int(np.ceil((bins[-1]-bins[xmin_idx])/10))
plt.xticks(bins[xmin_idx::xstep],['$2^{%d}$'%xl for xl in bins[xmin_idx::xstep]])
plt.xlim([bins[xmin_idx]-xstep,bins[-1]+xstep])
if log:
ymax = np.ceil(buckets.max())+1
ystep = np.ceil(ymax/5)
plt.yticks(np.arange(0,ymax+ystep,ystep),['$2^{%d}$'%yl for yl in np.arange(0,ymax+ystep,ystep)])
plt.ylim([0,ymax+1])
plt.tight_layout()
plt.show()
|
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
"""
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
"""
npz = fe.load_npz_file('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
res = fe.load_pkl_file('results/TvB_%s_N_%d_s_%d.pkl'%(test_data_id, N, samp))
re_un = npz['re_un']
percent = 100.*(re_un < 2**cap).sum()/np.array(res['T-B']-res['B-T'])
print('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d'%(N, samp, res['B-T'], res['T-B'],(re_un < 2**cap).sum(), percent, cap))
return percent
| 78 | 91 |
"""
Methods for assessing treatment of finite-precision issues
"""
import os
import sys
import time
import multiprocessing as mp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import plotter as ptr
import rnn_fxpts as rfx
import fxpt_experiments as fe
import pickle as pkl
def get_relative_errors(test_data_id):
"""
Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
"""
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for alg in ['traverse','baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print('%s, alg %s, N %d,samp %d'%(test_data_id,alg,N,samp))
npz = np.load('results/%s_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp))
W = npz['W']
fxV = npz['fxV']
fxV, converged = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = np.tanh(W.dot(fxV))-fxV
re = np.fabs(f/margin)
re_fx, re_un = re[:,converged].max(axis=0), re[:,~converged].max(axis=0)
re_fx = re_fx[re_fx > 0]
f_fx, f_un = np.fabs(f[:,converged]).max(axis=0), np.fabs(f[:,~converged]).max(axis=0)
f_fx = f_fx[f_fx > 0]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file('results/%s_re_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp), **re_npz)
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo = 10*(int(np.log2(m_fx).min()/10)-1)
if m_un.shape[0] > 0: hi = 10*(int(np.log2(m_un).max()/10)+1)
else: hi = 0
plt.xticks(range(-10,1,2),['']+['$2^{%d}$'%yl for yl in range(-8,1,2)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Error')
plt.show()
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
"""
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
"""
npz = fe.load_npz_file('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
res = fe.load_pkl_file('results/TvB_%s_N_%d_s_%d.pkl'%(test_data_id, N, samp))
re_un = npz['re_un']
percent = 100.*(re_un < 2**cap).sum()/np.array(res['T-B']-res['B-T'])
print('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d'%(N, samp, res['B-T'], res['T-B'],(re_un < 2**cap).sum(), percent, cap))
return percent
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
"""
Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
"""
percents = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id,N,samp,cap=cap))
percents = np.array(percents)
print('mean %%: %f%%'%percents.mean())
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo, hi = -20,50
plt.xticks(range(lo,hi+1,10),[''] + ['$2^{%d}$'%yl for yl in range(lo+10,hi+1,10)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show()
def get_baseline_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running baseline rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/baseline_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_baseline_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_baseline_rd(*args)
def run_baseline_rd(test_data_id, Ns, num_procs):
"""
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/baseline_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_traverse_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running traverse rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/traverse_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_traverse_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_traverse_rd(*args)
def run_traverse_rd(test_data_id, Ns, num_procs):
"""
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/traverse_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_simple_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
rfx.hardwrite(logfile,'Running simple rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
buckets = {}
bins = np.arange(-1025,3)
for method_key in ['traverse','baseline']:
npz = fe.load_npz_file('results/%s_%s_N_%d_s_%d.npz'%(method_key,test_data_id,N,samp))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros(len(bins)-1)
if cap is not None and fxV.shape[1] > cap:
rfx.hardwrite(logfile,'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile,'disting %d of %d...\n'%(j,fxV.shape[1]))
dists = np.fabs(fxV-fxV[:,[j]]).max(axis=0)
dists[dists == 0] = 2.0**bins[0]
logdists = np.log2(dists)
logdists[logdists < bins[0]] = bins[0]
logdists[logdists > bins[-1]] = bins[-1]
hist,_ = np.histogram(logdists,bins=bins)
buckets[method_key] += hist
npz = {'bins':bins,'traverse_buckets':buckets['traverse'],'baseline_buckets':buckets['baseline']}
fe.save_npz_file('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
rfx.hardwrite(logfile,'Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_simple_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_simple_rd(*args)
def run_simple_rd(test_data_id, Ns, num_procs):
"""
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 1000
for s in range(S):
logfilename = 'logs/simple_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all(): out_rr[:] = 4*in_rr.max()
else: out_rr[np.isinf(out_rr)] = 4*out_rr[~np.isinf(out_rr)].max()
print('out_rr:')
print(out_rr.shape)
print((out_rr==0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
# if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
# plt.hist(in_rr,bins=10,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
if buckets is None:
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8,2.4))
# plt.hist(buckets,bins=bins,log=log)
if log:
buckets[buckets > 0] = np.log2(buckets[buckets > 0])
plt.bar(left=bins[:-1],height=buckets,width=bins[1:]-bins[:-1],facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$') #'Max Coordinate-wise Distance')
xmin_idx = int(((bins[:-1] > -1000) & (buckets > 0)).argmax())
xstep = int(np.ceil((bins[-1]-bins[xmin_idx])/10))
plt.xticks(bins[xmin_idx::xstep],['$2^{%d}$'%xl for xl in bins[xmin_idx::xstep]])
plt.xlim([bins[xmin_idx]-xstep,bins[-1]+xstep])
if log:
ymax = np.ceil(buckets.max())+1
ystep = np.ceil(ymax/5)
plt.yticks(np.arange(0,ymax+ystep,ystep),['$2^{%d}$'%yl for yl in np.arange(0,ymax+ystep,ystep)])
plt.ylim([0,ymax+1])
plt.tight_layout()
plt.show()
|
run_baseline_rd
|
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
|
"""
Methods for assessing treatment of finite-precision issues
"""
import os
import sys
import time
import multiprocessing as mp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import plotter as ptr
import rnn_fxpts as rfx
import fxpt_experiments as fe
import pickle as pkl
def get_relative_errors(test_data_id):
"""
Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
"""
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for alg in ['traverse','baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print('%s, alg %s, N %d,samp %d'%(test_data_id,alg,N,samp))
npz = np.load('results/%s_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp))
W = npz['W']
fxV = npz['fxV']
fxV, converged = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = np.tanh(W.dot(fxV))-fxV
re = np.fabs(f/margin)
re_fx, re_un = re[:,converged].max(axis=0), re[:,~converged].max(axis=0)
re_fx = re_fx[re_fx > 0]
f_fx, f_un = np.fabs(f[:,converged]).max(axis=0), np.fabs(f[:,~converged]).max(axis=0)
f_fx = f_fx[f_fx > 0]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file('results/%s_re_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp), **re_npz)
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo = 10*(int(np.log2(m_fx).min()/10)-1)
if m_un.shape[0] > 0: hi = 10*(int(np.log2(m_un).max()/10)+1)
else: hi = 0
plt.xticks(range(-10,1,2),['']+['$2^{%d}$'%yl for yl in range(-8,1,2)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Error')
plt.show()
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
"""
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
"""
npz = fe.load_npz_file('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
res = fe.load_pkl_file('results/TvB_%s_N_%d_s_%d.pkl'%(test_data_id, N, samp))
re_un = npz['re_un']
percent = 100.*(re_un < 2**cap).sum()/np.array(res['T-B']-res['B-T'])
print('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d'%(N, samp, res['B-T'], res['T-B'],(re_un < 2**cap).sum(), percent, cap))
return percent
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
"""
Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
"""
percents = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id,N,samp,cap=cap))
percents = np.array(percents)
print('mean %%: %f%%'%percents.mean())
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo, hi = -20,50
plt.xticks(range(lo,hi+1,10),[''] + ['$2^{%d}$'%yl for yl in range(lo+10,hi+1,10)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show()
def get_baseline_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running baseline rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/baseline_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_baseline_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_baseline_rd(*args)
# MASKED: run_baseline_rd function (lines 178-204)
def get_traverse_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running traverse rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/traverse_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_traverse_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_traverse_rd(*args)
def run_traverse_rd(test_data_id, Ns, num_procs):
"""
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/traverse_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_simple_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
rfx.hardwrite(logfile,'Running simple rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
buckets = {}
bins = np.arange(-1025,3)
for method_key in ['traverse','baseline']:
npz = fe.load_npz_file('results/%s_%s_N_%d_s_%d.npz'%(method_key,test_data_id,N,samp))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros(len(bins)-1)
if cap is not None and fxV.shape[1] > cap:
rfx.hardwrite(logfile,'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile,'disting %d of %d...\n'%(j,fxV.shape[1]))
dists = np.fabs(fxV-fxV[:,[j]]).max(axis=0)
dists[dists == 0] = 2.0**bins[0]
logdists = np.log2(dists)
logdists[logdists < bins[0]] = bins[0]
logdists[logdists > bins[-1]] = bins[-1]
hist,_ = np.histogram(logdists,bins=bins)
buckets[method_key] += hist
npz = {'bins':bins,'traverse_buckets':buckets['traverse'],'baseline_buckets':buckets['baseline']}
fe.save_npz_file('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
rfx.hardwrite(logfile,'Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_simple_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_simple_rd(*args)
def run_simple_rd(test_data_id, Ns, num_procs):
"""
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 1000
for s in range(S):
logfilename = 'logs/simple_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all(): out_rr[:] = 4*in_rr.max()
else: out_rr[np.isinf(out_rr)] = 4*out_rr[~np.isinf(out_rr)].max()
print('out_rr:')
print(out_rr.shape)
print((out_rr==0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
# if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
# plt.hist(in_rr,bins=10,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
if buckets is None:
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8,2.4))
# plt.hist(buckets,bins=bins,log=log)
if log:
buckets[buckets > 0] = np.log2(buckets[buckets > 0])
plt.bar(left=bins[:-1],height=buckets,width=bins[1:]-bins[:-1],facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$') #'Max Coordinate-wise Distance')
xmin_idx = int(((bins[:-1] > -1000) & (buckets > 0)).argmax())
xstep = int(np.ceil((bins[-1]-bins[xmin_idx])/10))
plt.xticks(bins[xmin_idx::xstep],['$2^{%d}$'%xl for xl in bins[xmin_idx::xstep]])
plt.xlim([bins[xmin_idx]-xstep,bins[-1]+xstep])
if log:
ymax = np.ceil(buckets.max())+1
ystep = np.ceil(ymax/5)
plt.yticks(np.arange(0,ymax+ystep,ystep),['$2^{%d}$'%yl for yl in np.arange(0,ymax+ystep,ystep)])
plt.ylim([0,ymax+1])
plt.tight_layout()
plt.show()
|
def run_baseline_rd(test_data_id, Ns, num_procs):
"""
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/baseline_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
| 178 | 204 |
"""
Methods for assessing treatment of finite-precision issues
"""
import os
import sys
import time
import multiprocessing as mp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import plotter as ptr
import rnn_fxpts as rfx
import fxpt_experiments as fe
import pickle as pkl
def get_relative_errors(test_data_id):
"""
Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
"""
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for alg in ['traverse','baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print('%s, alg %s, N %d,samp %d'%(test_data_id,alg,N,samp))
npz = np.load('results/%s_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp))
W = npz['W']
fxV = npz['fxV']
fxV, converged = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = np.tanh(W.dot(fxV))-fxV
re = np.fabs(f/margin)
re_fx, re_un = re[:,converged].max(axis=0), re[:,~converged].max(axis=0)
re_fx = re_fx[re_fx > 0]
f_fx, f_un = np.fabs(f[:,converged]).max(axis=0), np.fabs(f[:,~converged]).max(axis=0)
f_fx = f_fx[f_fx > 0]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file('results/%s_re_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp), **re_npz)
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo = 10*(int(np.log2(m_fx).min()/10)-1)
if m_un.shape[0] > 0: hi = 10*(int(np.log2(m_un).max()/10)+1)
else: hi = 0
plt.xticks(range(-10,1,2),['']+['$2^{%d}$'%yl for yl in range(-8,1,2)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Error')
plt.show()
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
"""
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
"""
npz = fe.load_npz_file('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
res = fe.load_pkl_file('results/TvB_%s_N_%d_s_%d.pkl'%(test_data_id, N, samp))
re_un = npz['re_un']
percent = 100.*(re_un < 2**cap).sum()/np.array(res['T-B']-res['B-T'])
print('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d'%(N, samp, res['B-T'], res['T-B'],(re_un < 2**cap).sum(), percent, cap))
return percent
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
"""
Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
"""
percents = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id,N,samp,cap=cap))
percents = np.array(percents)
print('mean %%: %f%%'%percents.mean())
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo, hi = -20,50
plt.xticks(range(lo,hi+1,10),[''] + ['$2^{%d}$'%yl for yl in range(lo+10,hi+1,10)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show()
def get_baseline_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running baseline rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/baseline_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_baseline_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_baseline_rd(*args)
def run_baseline_rd(test_data_id, Ns, num_procs):
"""
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/baseline_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_traverse_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running traverse rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/traverse_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_traverse_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_traverse_rd(*args)
def run_traverse_rd(test_data_id, Ns, num_procs):
"""
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/traverse_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_simple_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
rfx.hardwrite(logfile,'Running simple rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
buckets = {}
bins = np.arange(-1025,3)
for method_key in ['traverse','baseline']:
npz = fe.load_npz_file('results/%s_%s_N_%d_s_%d.npz'%(method_key,test_data_id,N,samp))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros(len(bins)-1)
if cap is not None and fxV.shape[1] > cap:
rfx.hardwrite(logfile,'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile,'disting %d of %d...\n'%(j,fxV.shape[1]))
dists = np.fabs(fxV-fxV[:,[j]]).max(axis=0)
dists[dists == 0] = 2.0**bins[0]
logdists = np.log2(dists)
logdists[logdists < bins[0]] = bins[0]
logdists[logdists > bins[-1]] = bins[-1]
hist,_ = np.histogram(logdists,bins=bins)
buckets[method_key] += hist
npz = {'bins':bins,'traverse_buckets':buckets['traverse'],'baseline_buckets':buckets['baseline']}
fe.save_npz_file('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
rfx.hardwrite(logfile,'Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_simple_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_simple_rd(*args)
def run_simple_rd(test_data_id, Ns, num_procs):
"""
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 1000
for s in range(S):
logfilename = 'logs/simple_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all(): out_rr[:] = 4*in_rr.max()
else: out_rr[np.isinf(out_rr)] = 4*out_rr[~np.isinf(out_rr)].max()
print('out_rr:')
print(out_rr.shape)
print((out_rr==0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
# if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
# plt.hist(in_rr,bins=10,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
if buckets is None:
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8,2.4))
# plt.hist(buckets,bins=bins,log=log)
if log:
buckets[buckets > 0] = np.log2(buckets[buckets > 0])
plt.bar(left=bins[:-1],height=buckets,width=bins[1:]-bins[:-1],facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$') #'Max Coordinate-wise Distance')
xmin_idx = int(((bins[:-1] > -1000) & (buckets > 0)).argmax())
xstep = int(np.ceil((bins[-1]-bins[xmin_idx])/10))
plt.xticks(bins[xmin_idx::xstep],['$2^{%d}$'%xl for xl in bins[xmin_idx::xstep]])
plt.xlim([bins[xmin_idx]-xstep,bins[-1]+xstep])
if log:
ymax = np.ceil(buckets.max())+1
ystep = np.ceil(ymax/5)
plt.yticks(np.arange(0,ymax+ystep,ystep),['$2^{%d}$'%yl for yl in np.arange(0,ymax+ystep,ystep)])
plt.ylim([0,ymax+1])
plt.tight_layout()
plt.show()
|
run_traverse_rd
|
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
|
"""
Methods for assessing treatment of finite-precision issues
"""
import os
import sys
import time
import multiprocessing as mp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import plotter as ptr
import rnn_fxpts as rfx
import fxpt_experiments as fe
import pickle as pkl
def get_relative_errors(test_data_id):
"""
Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
"""
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for alg in ['traverse','baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print('%s, alg %s, N %d,samp %d'%(test_data_id,alg,N,samp))
npz = np.load('results/%s_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp))
W = npz['W']
fxV = npz['fxV']
fxV, converged = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = np.tanh(W.dot(fxV))-fxV
re = np.fabs(f/margin)
re_fx, re_un = re[:,converged].max(axis=0), re[:,~converged].max(axis=0)
re_fx = re_fx[re_fx > 0]
f_fx, f_un = np.fabs(f[:,converged]).max(axis=0), np.fabs(f[:,~converged]).max(axis=0)
f_fx = f_fx[f_fx > 0]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file('results/%s_re_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp), **re_npz)
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo = 10*(int(np.log2(m_fx).min()/10)-1)
if m_un.shape[0] > 0: hi = 10*(int(np.log2(m_un).max()/10)+1)
else: hi = 0
plt.xticks(range(-10,1,2),['']+['$2^{%d}$'%yl for yl in range(-8,1,2)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Error')
plt.show()
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
"""
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
"""
npz = fe.load_npz_file('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
res = fe.load_pkl_file('results/TvB_%s_N_%d_s_%d.pkl'%(test_data_id, N, samp))
re_un = npz['re_un']
percent = 100.*(re_un < 2**cap).sum()/np.array(res['T-B']-res['B-T'])
print('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d'%(N, samp, res['B-T'], res['T-B'],(re_un < 2**cap).sum(), percent, cap))
return percent
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
"""
Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
"""
percents = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id,N,samp,cap=cap))
percents = np.array(percents)
print('mean %%: %f%%'%percents.mean())
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo, hi = -20,50
plt.xticks(range(lo,hi+1,10),[''] + ['$2^{%d}$'%yl for yl in range(lo+10,hi+1,10)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show()
def get_baseline_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running baseline rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/baseline_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_baseline_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_baseline_rd(*args)
def run_baseline_rd(test_data_id, Ns, num_procs):
"""
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/baseline_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_traverse_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running traverse rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/traverse_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_traverse_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_traverse_rd(*args)
# MASKED: run_traverse_rd function (lines 244-271)
def get_simple_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
rfx.hardwrite(logfile,'Running simple rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
buckets = {}
bins = np.arange(-1025,3)
for method_key in ['traverse','baseline']:
npz = fe.load_npz_file('results/%s_%s_N_%d_s_%d.npz'%(method_key,test_data_id,N,samp))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros(len(bins)-1)
if cap is not None and fxV.shape[1] > cap:
rfx.hardwrite(logfile,'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile,'disting %d of %d...\n'%(j,fxV.shape[1]))
dists = np.fabs(fxV-fxV[:,[j]]).max(axis=0)
dists[dists == 0] = 2.0**bins[0]
logdists = np.log2(dists)
logdists[logdists < bins[0]] = bins[0]
logdists[logdists > bins[-1]] = bins[-1]
hist,_ = np.histogram(logdists,bins=bins)
buckets[method_key] += hist
npz = {'bins':bins,'traverse_buckets':buckets['traverse'],'baseline_buckets':buckets['baseline']}
fe.save_npz_file('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
rfx.hardwrite(logfile,'Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_simple_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_simple_rd(*args)
def run_simple_rd(test_data_id, Ns, num_procs):
"""
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 1000
for s in range(S):
logfilename = 'logs/simple_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all(): out_rr[:] = 4*in_rr.max()
else: out_rr[np.isinf(out_rr)] = 4*out_rr[~np.isinf(out_rr)].max()
print('out_rr:')
print(out_rr.shape)
print((out_rr==0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
# if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
# plt.hist(in_rr,bins=10,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
if buckets is None:
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8,2.4))
# plt.hist(buckets,bins=bins,log=log)
if log:
buckets[buckets > 0] = np.log2(buckets[buckets > 0])
plt.bar(left=bins[:-1],height=buckets,width=bins[1:]-bins[:-1],facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$') #'Max Coordinate-wise Distance')
xmin_idx = int(((bins[:-1] > -1000) & (buckets > 0)).argmax())
xstep = int(np.ceil((bins[-1]-bins[xmin_idx])/10))
plt.xticks(bins[xmin_idx::xstep],['$2^{%d}$'%xl for xl in bins[xmin_idx::xstep]])
plt.xlim([bins[xmin_idx]-xstep,bins[-1]+xstep])
if log:
ymax = np.ceil(buckets.max())+1
ystep = np.ceil(ymax/5)
plt.yticks(np.arange(0,ymax+ystep,ystep),['$2^{%d}$'%yl for yl in np.arange(0,ymax+ystep,ystep)])
plt.ylim([0,ymax+1])
plt.tight_layout()
plt.show()
|
def run_traverse_rd(test_data_id, Ns, num_procs):
"""
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/traverse_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
| 244 | 271 |
"""
Methods for assessing treatment of finite-precision issues
"""
import os
import sys
import time
import multiprocessing as mp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import plotter as ptr
import rnn_fxpts as rfx
import fxpt_experiments as fe
import pickle as pkl
def get_relative_errors(test_data_id):
"""
Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
"""
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for alg in ['traverse','baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print('%s, alg %s, N %d,samp %d'%(test_data_id,alg,N,samp))
npz = np.load('results/%s_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp))
W = npz['W']
fxV = npz['fxV']
fxV, converged = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = np.tanh(W.dot(fxV))-fxV
re = np.fabs(f/margin)
re_fx, re_un = re[:,converged].max(axis=0), re[:,~converged].max(axis=0)
re_fx = re_fx[re_fx > 0]
f_fx, f_un = np.fabs(f[:,converged]).max(axis=0), np.fabs(f[:,~converged]).max(axis=0)
f_fx = f_fx[f_fx > 0]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file('results/%s_re_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp), **re_npz)
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo = 10*(int(np.log2(m_fx).min()/10)-1)
if m_un.shape[0] > 0: hi = 10*(int(np.log2(m_un).max()/10)+1)
else: hi = 0
plt.xticks(range(-10,1,2),['']+['$2^{%d}$'%yl for yl in range(-8,1,2)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Error')
plt.show()
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
"""
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
"""
npz = fe.load_npz_file('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
res = fe.load_pkl_file('results/TvB_%s_N_%d_s_%d.pkl'%(test_data_id, N, samp))
re_un = npz['re_un']
percent = 100.*(re_un < 2**cap).sum()/np.array(res['T-B']-res['B-T'])
print('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d'%(N, samp, res['B-T'], res['T-B'],(re_un < 2**cap).sum(), percent, cap))
return percent
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
"""
Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
"""
percents = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id,N,samp,cap=cap))
percents = np.array(percents)
print('mean %%: %f%%'%percents.mean())
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo, hi = -20,50
plt.xticks(range(lo,hi+1,10),[''] + ['$2^{%d}$'%yl for yl in range(lo+10,hi+1,10)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show()
def get_baseline_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running baseline rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/baseline_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_baseline_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_baseline_rd(*args)
def run_baseline_rd(test_data_id, Ns, num_procs):
"""
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/baseline_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_traverse_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running traverse rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/traverse_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_traverse_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_traverse_rd(*args)
def run_traverse_rd(test_data_id, Ns, num_procs):
"""
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/traverse_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_simple_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
rfx.hardwrite(logfile,'Running simple rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
buckets = {}
bins = np.arange(-1025,3)
for method_key in ['traverse','baseline']:
npz = fe.load_npz_file('results/%s_%s_N_%d_s_%d.npz'%(method_key,test_data_id,N,samp))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros(len(bins)-1)
if cap is not None and fxV.shape[1] > cap:
rfx.hardwrite(logfile,'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile,'disting %d of %d...\n'%(j,fxV.shape[1]))
dists = np.fabs(fxV-fxV[:,[j]]).max(axis=0)
dists[dists == 0] = 2.0**bins[0]
logdists = np.log2(dists)
logdists[logdists < bins[0]] = bins[0]
logdists[logdists > bins[-1]] = bins[-1]
hist,_ = np.histogram(logdists,bins=bins)
buckets[method_key] += hist
npz = {'bins':bins,'traverse_buckets':buckets['traverse'],'baseline_buckets':buckets['baseline']}
fe.save_npz_file('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
rfx.hardwrite(logfile,'Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_simple_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_simple_rd(*args)
def run_simple_rd(test_data_id, Ns, num_procs):
"""
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 1000
for s in range(S):
logfilename = 'logs/simple_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all(): out_rr[:] = 4*in_rr.max()
else: out_rr[np.isinf(out_rr)] = 4*out_rr[~np.isinf(out_rr)].max()
print('out_rr:')
print(out_rr.shape)
print((out_rr==0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
# if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
# plt.hist(in_rr,bins=10,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
if buckets is None:
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8,2.4))
# plt.hist(buckets,bins=bins,log=log)
if log:
buckets[buckets > 0] = np.log2(buckets[buckets > 0])
plt.bar(left=bins[:-1],height=buckets,width=bins[1:]-bins[:-1],facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$') #'Max Coordinate-wise Distance')
xmin_idx = int(((bins[:-1] > -1000) & (buckets > 0)).argmax())
xstep = int(np.ceil((bins[-1]-bins[xmin_idx])/10))
plt.xticks(bins[xmin_idx::xstep],['$2^{%d}$'%xl for xl in bins[xmin_idx::xstep]])
plt.xlim([bins[xmin_idx]-xstep,bins[-1]+xstep])
if log:
ymax = np.ceil(buckets.max())+1
ystep = np.ceil(ymax/5)
plt.yticks(np.arange(0,ymax+ystep,ystep),['$2^{%d}$'%yl for yl in np.arange(0,ymax+ystep,ystep)])
plt.ylim([0,ymax+1])
plt.tight_layout()
plt.show()
|
run_simple_rd
|
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
|
"""
Methods for assessing treatment of finite-precision issues
"""
import os
import sys
import time
import multiprocessing as mp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import plotter as ptr
import rnn_fxpts as rfx
import fxpt_experiments as fe
import pickle as pkl
def get_relative_errors(test_data_id):
"""
Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
"""
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for alg in ['traverse','baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print('%s, alg %s, N %d,samp %d'%(test_data_id,alg,N,samp))
npz = np.load('results/%s_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp))
W = npz['W']
fxV = npz['fxV']
fxV, converged = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = np.tanh(W.dot(fxV))-fxV
re = np.fabs(f/margin)
re_fx, re_un = re[:,converged].max(axis=0), re[:,~converged].max(axis=0)
re_fx = re_fx[re_fx > 0]
f_fx, f_un = np.fabs(f[:,converged]).max(axis=0), np.fabs(f[:,~converged]).max(axis=0)
f_fx = f_fx[f_fx > 0]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file('results/%s_re_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp), **re_npz)
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo = 10*(int(np.log2(m_fx).min()/10)-1)
if m_un.shape[0] > 0: hi = 10*(int(np.log2(m_un).max()/10)+1)
else: hi = 0
plt.xticks(range(-10,1,2),['']+['$2^{%d}$'%yl for yl in range(-8,1,2)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Error')
plt.show()
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
"""
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
"""
npz = fe.load_npz_file('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
res = fe.load_pkl_file('results/TvB_%s_N_%d_s_%d.pkl'%(test_data_id, N, samp))
re_un = npz['re_un']
percent = 100.*(re_un < 2**cap).sum()/np.array(res['T-B']-res['B-T'])
print('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d'%(N, samp, res['B-T'], res['T-B'],(re_un < 2**cap).sum(), percent, cap))
return percent
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
"""
Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
"""
percents = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id,N,samp,cap=cap))
percents = np.array(percents)
print('mean %%: %f%%'%percents.mean())
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo, hi = -20,50
plt.xticks(range(lo,hi+1,10),[''] + ['$2^{%d}$'%yl for yl in range(lo+10,hi+1,10)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show()
def get_baseline_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running baseline rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/baseline_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_baseline_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_baseline_rd(*args)
def run_baseline_rd(test_data_id, Ns, num_procs):
"""
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/baseline_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_traverse_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running traverse rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/traverse_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_traverse_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_traverse_rd(*args)
def run_traverse_rd(test_data_id, Ns, num_procs):
"""
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/traverse_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_simple_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
rfx.hardwrite(logfile,'Running simple rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
buckets = {}
bins = np.arange(-1025,3)
for method_key in ['traverse','baseline']:
npz = fe.load_npz_file('results/%s_%s_N_%d_s_%d.npz'%(method_key,test_data_id,N,samp))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros(len(bins)-1)
if cap is not None and fxV.shape[1] > cap:
rfx.hardwrite(logfile,'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile,'disting %d of %d...\n'%(j,fxV.shape[1]))
dists = np.fabs(fxV-fxV[:,[j]]).max(axis=0)
dists[dists == 0] = 2.0**bins[0]
logdists = np.log2(dists)
logdists[logdists < bins[0]] = bins[0]
logdists[logdists > bins[-1]] = bins[-1]
hist,_ = np.histogram(logdists,bins=bins)
buckets[method_key] += hist
npz = {'bins':bins,'traverse_buckets':buckets['traverse'],'baseline_buckets':buckets['baseline']}
fe.save_npz_file('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
rfx.hardwrite(logfile,'Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_simple_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_simple_rd(*args)
# MASKED: run_simple_rd function (lines 316-343)
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all(): out_rr[:] = 4*in_rr.max()
else: out_rr[np.isinf(out_rr)] = 4*out_rr[~np.isinf(out_rr)].max()
print('out_rr:')
print(out_rr.shape)
print((out_rr==0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
# if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
# plt.hist(in_rr,bins=10,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
if buckets is None:
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8,2.4))
# plt.hist(buckets,bins=bins,log=log)
if log:
buckets[buckets > 0] = np.log2(buckets[buckets > 0])
plt.bar(left=bins[:-1],height=buckets,width=bins[1:]-bins[:-1],facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$') #'Max Coordinate-wise Distance')
xmin_idx = int(((bins[:-1] > -1000) & (buckets > 0)).argmax())
xstep = int(np.ceil((bins[-1]-bins[xmin_idx])/10))
plt.xticks(bins[xmin_idx::xstep],['$2^{%d}$'%xl for xl in bins[xmin_idx::xstep]])
plt.xlim([bins[xmin_idx]-xstep,bins[-1]+xstep])
if log:
ymax = np.ceil(buckets.max())+1
ystep = np.ceil(ymax/5)
plt.yticks(np.arange(0,ymax+ystep,ystep),['$2^{%d}$'%yl for yl in np.arange(0,ymax+ystep,ystep)])
plt.ylim([0,ymax+1])
plt.tight_layout()
plt.show()
|
def run_simple_rd(test_data_id, Ns, num_procs):
"""
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 1000
for s in range(S):
logfilename = 'logs/simple_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
| 316 | 343 |
"""
Methods for assessing treatment of finite-precision issues
"""
import os
import sys
import time
import multiprocessing as mp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import plotter as ptr
import rnn_fxpts as rfx
import fxpt_experiments as fe
import pickle as pkl
def get_relative_errors(test_data_id):
"""
Compute and save the relative errors of every point found on every network in a testing set.
Relative error is defined in (Katz and Reggia 2017).
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
"""
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for alg in ['traverse','baseline']:
for (N, S) in zip(network_sizes, num_samples):
for samp in range(S):
print('%s, alg %s, N %d,samp %d'%(test_data_id,alg,N,samp))
npz = np.load('results/%s_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp))
W = npz['W']
fxV = npz['fxV']
fxV, converged = rfx.refine_fxpts_capped(W, fxV)
margin = rfx.estimate_forward_error(W, fxV)
f = np.tanh(W.dot(fxV))-fxV
re = np.fabs(f/margin)
re_fx, re_un = re[:,converged].max(axis=0), re[:,~converged].max(axis=0)
re_fx = re_fx[re_fx > 0]
f_fx, f_un = np.fabs(f[:,converged]).max(axis=0), np.fabs(f[:,~converged]).max(axis=0)
f_fx = f_fx[f_fx > 0]
re_npz = {}
re_npz['f_fx'] = f_fx
re_npz['f_un'] = f_un
re_npz['re_fx'] = re_fx
re_npz['re_un'] = re_un
fe.save_npz_file('results/%s_re_%s_N_%d_s_%d.npz'%(alg,test_data_id,N,samp), **re_npz)
def show_traverse_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by fiber traversal.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo = 10*(int(np.log2(m_fx).min()/10)-1)
if m_un.shape[0] > 0: hi = 10*(int(np.log2(m_un).max()/10)+1)
else: hi = 0
plt.xticks(range(-10,1,2),['']+['$2^{%d}$'%yl for yl in range(-8,1,2)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Error')
plt.show()
def baseline_re_single_analysis(test_data_id, N, samp, cap=10):
"""
Analyze edge cases of relative errors on a single network
Uses the samp^{th} sample network of size N in test data test_data_id.
Relative errors in the range (0, 2^{cap}) are considered edge cases.
Returns the number of edge cases divided by the difference |T-B| - |B-T| as a percent.
T and B are as defined in (Katz and Reggia 2017).
"""
npz = fe.load_npz_file('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
res = fe.load_pkl_file('results/TvB_%s_N_%d_s_%d.pkl'%(test_data_id, N, samp))
re_un = npz['re_un']
percent = 100.*(re_un < 2**cap).sum()/np.array(res['T-B']-res['B-T'])
print('N=%d, samp %d: B-T = %d, T-B = %d, %d (%f%%) possibly unique slow RE(B) < 2**%d'%(N, samp, res['B-T'], res['T-B'],(re_un < 2**cap).sum(), percent, cap))
return percent
def baseline_re_batch_analysis(test_data_id, Ns, cap=10):
"""
Runs baseline_re_single_analysis on all networks in test_data_id of size N.
cap is as in baseline_re_single_analysis.
returns numpy.array percents, where
percents[i] is as in baseline_re_single_analysis for the i^{th} sample network.
"""
percents = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
for samp in range(S):
percents.append(baseline_re_single_analysis(test_data_id,N,samp,cap=cap))
percents = np.array(percents)
print('mean %%: %f%%'%percents.mean())
def show_baseline_re_fig(test_data_ids, Ns, samp_range):
"""
Plot relative errors from points found by the baseline solver.
test_data_ids and Ns should be length-2 lists.
Subplots in the first column will show errors networks of size Ns[0] from test_data_ids[0].
Similarly the second column draws from Ns[1], test_data_ids[1].
Each network sample within samp_range is shown on a separate row.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_re_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
m_fx, m_un = npz['re_fx'], npz['re_un']
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if m_un.shape[0] > 0: plt.hist(np.log2(m_un),bins=30,log=log,facecolor='k')
plt.hist(np.log2(m_fx),bins=10,log=log,facecolor='w')
lo, hi = -20,50
plt.xticks(range(lo,hi+1,10),[''] + ['$2^{%d}$'%yl for yl in range(lo+10,hi+1,10)])
if N == Ns[0]:
plt.ylabel('# of points')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Error')
baseline_re_single_analysis(test_data_id, N, samp)
plt.show()
def get_baseline_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running baseline rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/baseline_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_baseline_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_baseline_rd(*args)
def run_baseline_rd(test_data_id, Ns, num_procs):
"""
Run get_baseline_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N, S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/baseline_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_baseline_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_traverse_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Compute and save relative distances between pairs of points found by the baseline solver.
Relative distance is defined in (Katz and Reggia 2017).
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
logfile.write('Running traverse rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
npz = fe.load_npz_file('results/traverse_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
fxV = npz['fxV_converged']
fxV_unique = npz['fxV_unique']
W = npz['W']
if cap is not None and fxV.shape[1] > cap:
logfile.write('capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
in_RR, out_RR = [],[]
for j in range(fxV_unique.shape[1]):
logfile.write('duping %d of %d...\n'%(j,fxV_unique.shape[1]))
dups, RR, R = rfx.identical_fixed_points(W, fxV, fxV_unique[:,[j]])
in_RR.append(RR[dups])
out_RR.append(RR[~dups])
in_RR, out_RR = np.concatenate(in_RR), np.concatenate(out_RR)
npz["in_RR"], npz["out_RR"] = in_RR, out_RR
fe.save_npz_file('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
logfile.write('Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_traverse_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_traverse_rd(*args)
def run_traverse_rd(test_data_id, Ns, num_procs):
"""
Run get_traverse_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 20000
for s in range(S):
logfilename = 'logs/traverse_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_traverse_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def get_simple_rd(test_data_id,N,samp,cap,logfilename=os.devnull):
"""
Use simple unique test: if max absolute coordinate-wise difference < 2**-32
Compute and save distances between pairs of points found by both solvers.
Computes for the samp^{th} sample network of size N in test_data_id.
test_data_id should be as in fxpt_experiments.generate_test_data (without file extension).
Only pairs within a random subset of points of size cap are inspected.
Saves pair-wise distance distribution in histogram with one bucket per integer power of 2
logfilename is a file name at which progress updates are written.
"""
logfile = open(logfilename,'w')
rfx.hardwrite(logfile,'Running simple rd (%s,%d,%d)...\n'%(test_data_id,N,samp))
buckets = {}
bins = np.arange(-1025,3)
for method_key in ['traverse','baseline']:
npz = fe.load_npz_file('results/%s_%s_N_%d_s_%d.npz'%(method_key,test_data_id,N,samp))
fxV = npz['fxV_converged']
buckets[method_key] = np.zeros(len(bins)-1)
if cap is not None and fxV.shape[1] > cap:
rfx.hardwrite(logfile,'capping...\n')
perm = np.random.permutation(fxV.shape[1])
fxV = fxV[:,perm[:cap]]
for j in range(fxV.shape[1]):
rfx.hardwrite(logfile,'disting %d of %d...\n'%(j,fxV.shape[1]))
dists = np.fabs(fxV-fxV[:,[j]]).max(axis=0)
dists[dists == 0] = 2.0**bins[0]
logdists = np.log2(dists)
logdists[logdists < bins[0]] = bins[0]
logdists[logdists > bins[-1]] = bins[-1]
hist,_ = np.histogram(logdists,bins=bins)
buckets[method_key] += hist
npz = {'bins':bins,'traverse_buckets':buckets['traverse'],'baseline_buckets':buckets['baseline']}
fe.save_npz_file('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp), **npz)
rfx.hardwrite(logfile,'Done.\n')
logfile.close()
print('Done %s %d %d'%(test_data_id,N,samp))
def pool_get_simple_rd(args):
"""
Wrapper function passed to multiprocessing.Pool
"""
get_simple_rd(*args)
def run_simple_rd(test_data_id, Ns, num_procs):
"""
Run get_simple_rd on all networks in test_data_id whose size is in the list Ns.
Multiprocessing is used to run on multiple networks in parallel.
num_procs is the number of processors to use.
"""
cpu_count = mp.cpu_count()
print('%d cpus, using %d'%(cpu_count, num_procs))
pool_args = []
network_sizes, num_samples, _ = fe.load_test_data('%s.npz'%test_data_id)
for (N,S) in zip(network_sizes, num_samples):
if N not in Ns: continue
cap = 1000
for s in range(S):
logfilename = 'logs/simple_rd_%s_N_%d_s_%d.log'%(test_data_id,N,s)
pool_args.append((test_data_id,N,s,cap,logfilename))
start_time = time.time()
test_fun = pool_get_simple_rd
if num_procs < 1: # don't multiprocess
for args in pool_args: test_fun(args)
else:
pool = mp.Pool(processes=num_procs)
pool.map(test_fun, pool_args)
pool.close()
pool.join()
print('total time: %f'%(time.time()-start_time))
def show_traverse_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/traverse_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Fiber Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_baseline_rd_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by the baseline solver.
test_ids, Ns, and samp_range should be as in show_baseline_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
sp = 1
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/baseline_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
in_rr, out_rr = npz['in_RR'], npz['out_RR']
if (in_rr > 0).any(): in_rr[in_rr == 0] = in_rr[in_rr > 0].min()
else: in_rr[in_rr == 0] = 2**(-30)
ax = plt.subplot(len(samp_range),len(Ns),sp)
sp += 1
if np.isinf(out_rr).any():
if np.isinf(out_rr).all(): out_rr[:] = 4*in_rr.max()
else: out_rr[np.isinf(out_rr)] = 4*out_rr[~np.isinf(out_rr)].max()
print('out_rr:')
print(out_rr.shape)
print((out_rr==0).sum())
print(np.isinf(in_rr).sum())
print(np.isinf(out_rr).sum())
print(np.isnan(out_rr).sum())
if out_rr.shape[0] > 0: plt.hist(np.log2(out_rr),bins=30,log=log,facecolor='k')
# if out_rr.shape[0] > 0: plt.hist(out_rr,bins=30,facecolor='k')
plt.hist(np.log2(in_rr),bins=10,log=log,facecolor='w')
# plt.hist(in_rr,bins=10,facecolor='w')
if N == Ns[0]:
plt.ylabel('# of pairs')
if samp == samp_range[0]:
ax.set_title('N = %d'%N)
if samp == samp_range[-1]:
plt.xlabel('Baseline Relative Distance')
plt.xlim([-30,50])
plt.xticks(range(-30,51,10),['']+['$2^{%d}$'%xl for xl in range(-20,51,10)])
plt.show()
def show_simple_rd_all_fig(test_data_ids, Ns, samp_range):
"""
Plot relative distances from points found by fiber traversal or baseline.
test_ids, Ns, and samp_range should be as in show_traverse_re_fig.
"""
log = True
mpl.rcParams['mathtext.default'] = 'regular'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
buckets = None
bins = None
for samp in samp_range:
for (test_data_id,N) in zip(test_data_ids, Ns):
print('samp %d, N %d'%(samp,N))
npz = np.load('results/simple_rd_%s_N_%d_s_%d.npz'%(test_data_id,N,samp))
if buckets is None:
buckets = np.zeros(npz['traverse_buckets'].shape)
bins = npz['bins']
buckets += npz['traverse_buckets']
buckets += npz['baseline_buckets']
plt.figure(figsize=(8,2.4))
# plt.hist(buckets,bins=bins,log=log)
if log:
buckets[buckets > 0] = np.log2(buckets[buckets > 0])
plt.bar(left=bins[:-1],height=buckets,width=bins[1:]-bins[:-1],facecolor='none')
plt.ylabel('# of pairs')
plt.xlabel('$max_i|v_i^{(1)}-v_i^{(2)}|$') #'Max Coordinate-wise Distance')
xmin_idx = int(((bins[:-1] > -1000) & (buckets > 0)).argmax())
xstep = int(np.ceil((bins[-1]-bins[xmin_idx])/10))
plt.xticks(bins[xmin_idx::xstep],['$2^{%d}$'%xl for xl in bins[xmin_idx::xstep]])
plt.xlim([bins[xmin_idx]-xstep,bins[-1]+xstep])
if log:
ymax = np.ceil(buckets.max())+1
ystep = np.ceil(ymax/5)
plt.yticks(np.arange(0,ymax+ystep,ystep),['$2^{%d}$'%yl for yl in np.arange(0,ymax+ystep,ystep)])
plt.ylim([0,ymax+1])
plt.tight_layout()
plt.show()
|
get_interaction_table
|
Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
|
# -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : Changxin Tian
# @Email : [email protected]
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
# MASKED: get_interaction_table function (lines 90-110)
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
|
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
| 90 | 110 |
# -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : Changxin Tian
# @Email : [email protected]
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
|
sample_neg_interaction
|
Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
|
# -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : Changxin Tian
# @Email : [email protected]
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
# MASKED: sample_neg_interaction function (lines 112-133)
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
|
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
| 112 | 133 |
# -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : Changxin Tian
# @Email : [email protected]
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
|
construct_adj
|
Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
|
# -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : Changxin Tian
# @Email : [email protected]
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
# MASKED: construct_adj function (lines 135-191)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
|
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
| 135 | 191 |
# -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : Changxin Tian
# @Email : [email protected]
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
|
calculate_ls_loss
|
Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
|
# -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : Changxin Tian
# @Email : [email protected]
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
# MASKED: calculate_ls_loss function (lines 397-414)
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
|
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
| 397 | 414 |
# -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : Changxin Tian
# @Email : [email protected]
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
|
load_data
|
generate the train and val dataloader, you can change this for your specific task
Args:
traindir (str): train dataset dir
valdir (str): validation dataset dir
Returns:
tuple: the train dataset and validation dataset
|
import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from ignite.metrics import IoU, Precision, Recall
import torchsat.transforms.transforms_cd as T
from torchsat.datasets.folder import ChangeDetectionDataset
from torchsat.models import FC_EF, FC_Siam_Conc, FC_Siam_Diff
def train_one_epoch(epoch, dataloader, model, criterion, optimizer, device, writer):
print('train epoch {}'.format(epoch))
model.train()
for idx, (pre_img, post_img, targets) in enumerate(dataloader):
pre_img, post_img, targets = pre_img.to(device), post_img.to(device), targets.to(device)
outputs = model(pre_img, post_img)
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('train-epoch:{} [{}/{}], loss: {:5.3}'.format(epoch, idx+1, len(dataloader), loss.item()))
writer.add_scalar('train/loss', loss.item(), len(dataloader)*epoch+idx)
def evalidation(epoch, dataloader, model, criterion, device, writer, tb_test_imgs):
print('\neval epoch {}'.format(epoch))
model.eval()
recall = Recall(lambda x: (x[0], x[1]))
precision = Precision(lambda x: (x[0], x[1]))
mean_recall = []
mean_precision = []
mean_loss = []
with torch.no_grad():
for idx, (pre_img, post_img, targets) in enumerate(dataloader):
pre_img, post_img, targets = pre_img.to(device), post_img.to(device), targets.to(device)
outputs = model(pre_img, post_img)
loss = criterion(outputs, targets)
preds = outputs.argmax(1)
precision.update((preds, targets))
recall.update((preds, targets))
mean_loss.append(loss.item())
mean_recall.append(recall.compute().item())
mean_precision.append(precision.compute().item())
# print('val-epoch:{} [{}/{}], loss: {:5.3}'.format(epoch, idx + 1, len(dataloader), loss.item()))
writer.add_scalar('test/loss', loss.item(), len(dataloader) * epoch + idx)
if idx < tb_test_imgs:
writer.add_image('test/pre', pre_img[0], idx)
writer.add_image('test/post', post_img[0], idx)
writer.add_image('test/label', label[0], idx)
writer.add_image('test/pred', preds, idx)
mean_precision, mean_recall = np.array(mean_precision).mean(), np.array(mean_recall).mean()
f1 = mean_precision * mean_recall * 2 / (mean_precision + mean_recall + 1e-20)
print('precision: {:07.5}, recall: {:07.5}, f1: {:07.5}\n'.format(mean_precision, mean_recall, f1))
writer.add_scalar('test/epoch-loss', np.array(mean_loss).mean(), epoch)
writer.add_scalar('test/f1', f1, epoch)
writer.add_scalar('test/precision', mean_precision, epoch)
writer.add_scalar('test/recall', mean_recall, epoch)
# MASKED: load_data function (lines 72-96)
def main(args):
torch.backends.cudnn.benchmark = True
device = torch.device('cuda' if args.device == 'cuda' else 'cpu')
# dataset and dataloader
train_data, val_data = load_data(args.train_path, args.val_path, extensions=args.extensions)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True)
val_loader = DataLoader(val_data, batch_size=1, shuffle=False)
# model
# model = get_model(args.model, args.num_classes, pretrained=args.pretrained)
# model = FC_EF(num_classes=args.num_classes)
model = FC_Siam_Diff(num_classes=args.num_classes)
model.to(device)
if args.resume:
model.load_state_dict(torch.load(args.resume, map_location=device))
# TODO: resume learning rate
# loss
criterion = nn.CrossEntropyLoss().to(device)
criterion = nn.BCELoss()
# optim and lr scheduler
optimizer = optim.Adam(model.parameters(), lr=args.lr)
lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1, eta_min=1e-8)
# lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
writer = SummaryWriter(args.ckp_dir)
for epoch in range(args.epochs):
writer.add_scalar('train/lr', lr_scheduler.get_lr()[0], epoch)
train_one_epoch(epoch, train_loader, model, criterion, optimizer, device, writer)
evalidation(epoch, val_loader, model, criterion, device, writer, args.tb_test_imgs)
lr_scheduler.step()
if epoch % 2 == 0:
torch.save(model.state_dict(), os.path.join(args.ckp_dir, 'cd_epoch_{}.pth'.format(epoch)))
def parse_args():
parser = argparse.ArgumentParser(description='TorchSat Change Detection Training Script')
parser.add_argument('--train-path', help='train dataset path')
parser.add_argument('--val-path', help='validate dataset path')
parser.add_argument('--extensions', nargs='+', default='jpg', help='the train image extension')
parser.add_argument('--model', default="unet34", help='model name. default, unet34')
parser.add_argument('--pretrained', default=True, help='use ImageNet pretrained params')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--num-classes', default=3, type=int, help='num of classes')
parser.add_argument('--in-channels', default=3, type=int, help='input image channels')
parser.add_argument('--device', default='cpu', help='device')
parser.add_argument('-b', '--batch-size', default=16, type=int, help='batch size')
parser.add_argument('--epochs', default=90, type=int, help='epochs')
parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')
parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('--ckp-dir', default='./', help='path to save checkpoint')
parser.add_argument('--tb-test-imgs', default=10, help='the num of test image show in tensorboard')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
def load_data(traindir, valdir, **kwargs):
"""generate the train and val dataloader, you can change this for your specific task
Args:
traindir (str): train dataset dir
valdir (str): validation dataset dir
Returns:
tuple: the train dataset and validation dataset
"""
train_transform = T.Compose([
T.RandomCrop(512),
T.RandomHorizontalFlip(),
T.RandomVerticalFlip(),
T.ToTensor(),
T.Normalize(),
])
val_transform = T.Compose([
T.ToTensor(),
T.Normalize(),
])
dataset_train = ChangeDetectionDataset(traindir, extentions=kwargs['extensions'], transforms=train_transform, )
dataset_val = ChangeDetectionDataset(valdir, extentions=kwargs['extensions'], transforms=val_transform)
return dataset_train, dataset_val
| 72 | 96 |
import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from ignite.metrics import IoU, Precision, Recall
import torchsat.transforms.transforms_cd as T
from torchsat.datasets.folder import ChangeDetectionDataset
from torchsat.models import FC_EF, FC_Siam_Conc, FC_Siam_Diff
def train_one_epoch(epoch, dataloader, model, criterion, optimizer, device, writer):
print('train epoch {}'.format(epoch))
model.train()
for idx, (pre_img, post_img, targets) in enumerate(dataloader):
pre_img, post_img, targets = pre_img.to(device), post_img.to(device), targets.to(device)
outputs = model(pre_img, post_img)
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('train-epoch:{} [{}/{}], loss: {:5.3}'.format(epoch, idx+1, len(dataloader), loss.item()))
writer.add_scalar('train/loss', loss.item(), len(dataloader)*epoch+idx)
def evalidation(epoch, dataloader, model, criterion, device, writer, tb_test_imgs):
print('\neval epoch {}'.format(epoch))
model.eval()
recall = Recall(lambda x: (x[0], x[1]))
precision = Precision(lambda x: (x[0], x[1]))
mean_recall = []
mean_precision = []
mean_loss = []
with torch.no_grad():
for idx, (pre_img, post_img, targets) in enumerate(dataloader):
pre_img, post_img, targets = pre_img.to(device), post_img.to(device), targets.to(device)
outputs = model(pre_img, post_img)
loss = criterion(outputs, targets)
preds = outputs.argmax(1)
precision.update((preds, targets))
recall.update((preds, targets))
mean_loss.append(loss.item())
mean_recall.append(recall.compute().item())
mean_precision.append(precision.compute().item())
# print('val-epoch:{} [{}/{}], loss: {:5.3}'.format(epoch, idx + 1, len(dataloader), loss.item()))
writer.add_scalar('test/loss', loss.item(), len(dataloader) * epoch + idx)
if idx < tb_test_imgs:
writer.add_image('test/pre', pre_img[0], idx)
writer.add_image('test/post', post_img[0], idx)
writer.add_image('test/label', label[0], idx)
writer.add_image('test/pred', preds, idx)
mean_precision, mean_recall = np.array(mean_precision).mean(), np.array(mean_recall).mean()
f1 = mean_precision * mean_recall * 2 / (mean_precision + mean_recall + 1e-20)
print('precision: {:07.5}, recall: {:07.5}, f1: {:07.5}\n'.format(mean_precision, mean_recall, f1))
writer.add_scalar('test/epoch-loss', np.array(mean_loss).mean(), epoch)
writer.add_scalar('test/f1', f1, epoch)
writer.add_scalar('test/precision', mean_precision, epoch)
writer.add_scalar('test/recall', mean_recall, epoch)
def load_data(traindir, valdir, **kwargs):
"""generate the train and val dataloader, you can change this for your specific task
Args:
traindir (str): train dataset dir
valdir (str): validation dataset dir
Returns:
tuple: the train dataset and validation dataset
"""
train_transform = T.Compose([
T.RandomCrop(512),
T.RandomHorizontalFlip(),
T.RandomVerticalFlip(),
T.ToTensor(),
T.Normalize(),
])
val_transform = T.Compose([
T.ToTensor(),
T.Normalize(),
])
dataset_train = ChangeDetectionDataset(traindir, extentions=kwargs['extensions'], transforms=train_transform, )
dataset_val = ChangeDetectionDataset(valdir, extentions=kwargs['extensions'], transforms=val_transform)
return dataset_train, dataset_val
def main(args):
torch.backends.cudnn.benchmark = True
device = torch.device('cuda' if args.device == 'cuda' else 'cpu')
# dataset and dataloader
train_data, val_data = load_data(args.train_path, args.val_path, extensions=args.extensions)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True)
val_loader = DataLoader(val_data, batch_size=1, shuffle=False)
# model
# model = get_model(args.model, args.num_classes, pretrained=args.pretrained)
# model = FC_EF(num_classes=args.num_classes)
model = FC_Siam_Diff(num_classes=args.num_classes)
model.to(device)
if args.resume:
model.load_state_dict(torch.load(args.resume, map_location=device))
# TODO: resume learning rate
# loss
criterion = nn.CrossEntropyLoss().to(device)
criterion = nn.BCELoss()
# optim and lr scheduler
optimizer = optim.Adam(model.parameters(), lr=args.lr)
lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=1, eta_min=1e-8)
# lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
writer = SummaryWriter(args.ckp_dir)
for epoch in range(args.epochs):
writer.add_scalar('train/lr', lr_scheduler.get_lr()[0], epoch)
train_one_epoch(epoch, train_loader, model, criterion, optimizer, device, writer)
evalidation(epoch, val_loader, model, criterion, device, writer, args.tb_test_imgs)
lr_scheduler.step()
if epoch % 2 == 0:
torch.save(model.state_dict(), os.path.join(args.ckp_dir, 'cd_epoch_{}.pth'.format(epoch)))
def parse_args():
parser = argparse.ArgumentParser(description='TorchSat Change Detection Training Script')
parser.add_argument('--train-path', help='train dataset path')
parser.add_argument('--val-path', help='validate dataset path')
parser.add_argument('--extensions', nargs='+', default='jpg', help='the train image extension')
parser.add_argument('--model', default="unet34", help='model name. default, unet34')
parser.add_argument('--pretrained', default=True, help='use ImageNet pretrained params')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--num-classes', default=3, type=int, help='num of classes')
parser.add_argument('--in-channels', default=3, type=int, help='input image channels')
parser.add_argument('--device', default='cpu', help='device')
parser.add_argument('-b', '--batch-size', default=16, type=int, help='batch size')
parser.add_argument('--epochs', default=90, type=int, help='epochs')
parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')
parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('--ckp-dir', default='./', help='path to save checkpoint')
parser.add_argument('--tb-test-imgs', default=10, help='the num of test image show in tensorboard')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
build_collective_reduce
|
Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
# MASKED: build_collective_reduce function (lines 326-364)
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
| 326 | 364 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
sum_gradients_all_reduce
|
Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
# MASKED: sum_gradients_all_reduce function (lines 408-447)
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
| 408 | 447 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
pack_range
|
Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
# MASKED: pack_range function (lines 490-519)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
| 490 | 519 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
unpack_grad_tuple
|
Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
# MASKED: unpack_grad_tuple function (lines 522-542)
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
| 522 | 542 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
unpack_small_tensors
|
Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
# MASKED: unpack_small_tensors function (lines 601-630)
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
| 601 | 630 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
__init__
|
Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
# MASKED: __init__ function (lines 252-272)
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
| 252 | 272 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
get_group_key
|
Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
# MASKED: get_group_key function (lines 281-303)
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
| 281 | 303 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import threading
from tensorflow.python.distribute import all_reduce
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
def group_device_names(devices, group_size):
"""Group device names into groups of group_size.
Args:
devices: a list of canonical device strings.
group_size: integer which is equal to or greater than 1.
Returns:
list of lists of devices, where each inner list is group_size long,
and each device appears at least once in an inner list. If
len(devices) % group_size == 0 then each device will appear exactly once.
Raises:
ValueError: if group_size > len(devices)
"""
num_devices = len(devices)
if group_size > num_devices:
raise ValueError(
'only %d devices, but group_size=%d' % (num_devices, group_size))
num_groups = (
num_devices // group_size + (1 if (num_devices % group_size != 0) else 0))
groups = [[] for i in range(num_groups)]
for i in range(num_groups * group_size):
groups[i % num_groups].append(devices[i % num_devices])
return groups
def split_grads_by_size(threshold_size, device_grads):
"""Break gradients into two sets according to tensor size.
Args:
threshold_size: int size cutoff for small vs large tensor.
device_grads: List of lists of (gradient, variable) tuples. The outer
list is over devices. The inner list is over individual gradients.
Returns:
small_grads: Subset of device_grads where shape is <= threshold_size
elements.
large_grads: Subset of device_grads where shape is > threshold_size
elements.
"""
small_grads = []
large_grads = []
for dl in device_grads:
small_dl = []
large_dl = []
for (g, v) in dl:
tensor_size = g.get_shape().num_elements()
if tensor_size <= threshold_size:
small_dl.append([g, v])
else:
large_dl.append([g, v])
if small_dl:
small_grads.append(small_dl)
if large_dl:
large_grads.append(large_dl)
return small_grads, large_grads
# threading.Lock() and threading.local() cannot be pickled and therefore cannot
# be a field of CollectiveKeys. Right now _thread_local is not necessary to be
# an instance member of CollectiveKeys since we always create a new thread for
# each replica.
_lock = threading.Lock()
_thread_local = threading.local()
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
"Graph key": an integer key that is unique key graph. This is used to support
multiple graphs per client session. It must be non-zero and set in the
`config` argument of each call to `session.run`.
"""
def __init__(self,
group_key_start=1,
instance_key_start=100,
instance_key_with_id_start=10000):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
instance_key_start: the starting integer of instance key.
instance_key_with_id_start: the starting integer of instance key that is
recorded with an id.
"""
self._group_key = group_key_start
self._group_key_table = dict()
# For instance keys with ids
self._instance_key_id_to_key_table = dict()
self._instance_key_with_id_counter = instance_key_with_id_start
# For instance keys without ids
self._instance_key_start = instance_key_start
def _get_thread_local_object(self):
# We make instance key without key ids thread local so that it will work
# with MirroredStrategy and distribute coordinator.
if not hasattr(_thread_local, 'instance_key'):
_thread_local.instance_key = self._instance_key_start
return _thread_local
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: list of strings naming devices in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
# In the between-graph replicated training, different workers need to get
# the same device key. So we remove the task_type and task_id from the
# devices.
# TODO(yuefengz): in the in-graph replicated training, we need to include
# task_type and task_id.
names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
key_id = ','.join(names)
with _lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
return self._group_key_table[key_id]
def get_instance_key(self, key_id=None):
"""Returns a new instance key for use in defining a collective op.
Args:
key_id: optional string. If set, key will be recorded and the same key
will be returned when the same key_id is provided. If not, an increasing
instance key will be returned.
"""
if key_id:
with _lock:
if key_id not in self._instance_key_id_to_key_table:
self._instance_key_with_id_counter += 1
self._instance_key_id_to_key_table[key_id] = (
self._instance_key_with_id_counter)
return self._instance_key_id_to_key_table[key_id]
else:
v = self._get_thread_local_object().instance_key
self._get_thread_local_object().instance_key += 1
return v
def build_collective_reduce(input_tensors,
num_workers,
collective_keys,
reduction_op='Add',
unary_op='Id'):
"""Build a subgraph that does one full all-reduce, using the collective Op.
Args:
input_tensors: tensors within a single worker graph that are to be reduced
together; must be one per device.
num_workers: total number of workers with identical independent graphs that
will be doing this same reduction. The reduction will actually include
the corresponding tensors at all these workers.
collective_keys: a CollectiveKeys object.
reduction_op: string naming the reduction op.
unary_op: string naming the unary final op.
Returns:
An array of final tensors, one per device, computed by the full reduction.
Raises:
ValueError: There must be at least two tensors over all the workers.
"""
group_size = len(input_tensors) * num_workers
if group_size < 2:
raise ValueError('num_workers * len(input_tensors) must be 2 or greater')
devices = [t.device for t in input_tensors]
num_devices = len(devices)
group_key = collective_keys.get_group_key(devices)
instance_key = collective_keys.get_instance_key()
out_tensors = []
subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec
for d in range(num_devices):
with ops.device(devices[d]):
reduce_op = collective_ops.all_reduce(
input_tensors[d], group_size, group_key, instance_key, reduction_op,
unary_op, subdiv_offsets)
out_tensors.append(reduce_op)
return out_tensors
def sum_grad_and_var_all_reduce(grad_and_vars,
num_workers,
alg,
gpu_indices,
aux_devices=None,
num_shards=1):
"""Apply all-reduce algorithm over specified gradient tensors."""
with ops.name_scope('allreduce'):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
scaled_grads = [g for g, _ in grad_and_vars]
if alg == 'nccl':
summed_grads = nccl_ops.all_sum(scaled_grads)
elif alg == 'xring':
summed_grads = all_reduce.build_ring_all_reduce(
scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add)
elif alg == 'nccl/xring':
summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards,
math_ops.add)
elif alg == 'nccl/rechd':
summed_grads = all_reduce.build_nccl_then_recursive_hd(
scaled_grads, math_ops.add)
elif alg == 'nccl/pscpu':
summed_grads = all_reduce.build_nccl_then_shuffle(
scaled_grads, aux_devices, math_ops.add, math_ops.add_n)
elif alg == 'pscpu/pscpu':
second_gather_devices = aux_devices[:num_shards]
summed_grads = all_reduce.build_shuffle_then_shuffle(
scaled_grads, aux_devices, second_gather_devices, math_ops.add_n)
elif alg in ['pscpu', 'psgpu']:
summed_grads = all_reduce.build_shuffle_all_reduce(
scaled_grads, aux_devices, math_ops.add_n)
else:
raise ValueError('unsupported all_reduce alg: ', alg)
result = []
for (_, v), g in zip(grad_and_vars, summed_grads):
result.append([g, v])
return result
def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg,
num_shards, gpu_indices):
"""Apply all-reduce algorithm over specified gradient tensors.
Args:
dev_prefixes: list of prefix strings to use to generate PS device names.
replica_grads: the gradients to reduce.
num_workers: number of worker processes across entire job.
alg: the all-reduce algorithm to apply.
num_shards: alg-specific sharding factor.
gpu_indices: indices of local GPUs in order usable for ring-reduce.
Returns:
list of reduced tensors
"""
alg_contains_shuffle = any([n in alg for n in ['pscpu', 'psgpu']])
is_hierarchical = '/' in alg
if 'pscpu' in alg:
aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes]
elif 'psgpu' in alg:
aux_devices = [
prefix + '/gpu:%d' % i
for i in range(len(gpu_indices))
for prefix in dev_prefixes
]
else:
aux_devices = ['/job:localhost/cpu:0']
# Auxiliary devices for hierarchical all-reduces.
aux_device_groups = group_device_names(
aux_devices, num_shards if alg_contains_shuffle else 1)
group_index = 0
reduced_gv_list = []
for grad_and_vars in zip(*replica_grads):
reduced_gv_list.append(
sum_grad_and_var_all_reduce(
grad_and_vars, num_workers, alg, gpu_indices, aux_devices
if is_hierarchical else aux_device_groups[group_index], num_shards))
group_index = (group_index + 1) % len(aux_device_groups)
new_replica_grads = [list(x) for x in zip(*reduced_gv_list)]
return new_replica_grads
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists, it will be returned as multiple
ranges.
Returns:
(ranges, singles) where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes')
def pack_range(key, packing, grad_vars, rng):
"""Form the concatenation of a specified range of gradient tensors.
Args:
key: Value under which to store meta-data in packing that will be used
later to restore the grad_var list structure.
packing: Dict holding data describing packed ranges of small tensors.
grad_vars: List of (grad, var) pairs for one replica.
rng: A pair of integers giving the first, last indices of a consecutive
range of tensors to be packed.
Returns:
A tensor that is the concatenation of all the specified small tensors.
"""
to_pack = grad_vars[rng[0]:rng[1] + 1]
members = []
variables = []
restore_shapes = []
with ops.name_scope('pack'):
for g, v in to_pack:
variables.append(v)
restore_shapes.append(g.shape)
with ops.device(g.device):
members.append(array_ops.reshape(g, [-1]))
packing[key] = GradPackTuple(
indices=range(rng[0], rng[1] + 1),
vars=variables,
shapes=restore_shapes)
with ops.device(members[0].device):
return array_ops.concat(members, 0)
def unpack_grad_tuple(gv, gpt):
"""Unpack a previously packed collection of gradient tensors.
Args:
gv: A (grad, var) pair to be unpacked.
gpt: A GradPackTuple describing the packing operation that produced gv.
Returns:
A list of (grad, var) pairs corresponding to the values that were
originally packed into gv, maybe following subsequent operations like
reduction.
"""
elt_widths = [x.num_elements() for x in gpt.shapes]
with ops.device(gv[0][0].device):
with ops.name_scope('unpack'):
splits = array_ops.split(gv[0], elt_widths)
unpacked_gv = []
for idx, s in enumerate(splits):
unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]),
gpt.vars[idx]))
return unpacked_gv
def pack_small_tensors(replica_grads, max_bytes=0, max_group=0):
"""Concatenate small gradient tensors together for reduction.
Args:
replica_grads: List of lists of (gradient, variable) tuples.
max_bytes: Int giving max number of bytes in a tensor that
may be considered small.
max_group: Int giving max number of small tensors that may be
concatenated into one new tensor.
Returns:
new_replica_grads, packing where new_replica_grads is identical to
replica_grads except that all feasible small_tensors have been removed
from their places and concatenated into larger tensors that are
now in the front of the list for each replica, and packing contains
the data necessary to restore the replica_grads structure.
Look through the first replica for gradients of the same type (float),
and small size, that are all sequential. For each such group,
replace by a new tensor that is a flattened concatenation. Note
that the corresponding variable will be absent, which doesn't matter
because it isn't used during all-reduce.
Requires:
Every gv_list in replicas must have isomorphic structure including identical
tensor sizes and types.
"""
small_indices = []
large_indices = []
for idx, (g, _) in enumerate(replica_grads[0]):
if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes:
small_indices.append(idx)
else:
large_indices.append(idx)
small_ranges, small_singles = extract_ranges(
small_indices, range_size_limit=max_group)
large_indices = sorted(large_indices + small_singles)
num_gv = len(replica_grads[0])
packing = {}
if small_ranges:
new_replica_grads = []
for dev_idx, gv_list in enumerate(replica_grads):
assert len(gv_list) == num_gv
new_gv_list = []
for r in small_ranges:
key = '%d:%d' % (dev_idx, len(new_gv_list))
new_gv_list.append((pack_range(key, packing, gv_list, r),
'packing_var_placeholder'))
for i in large_indices:
new_gv_list.append(gv_list[i])
new_replica_grads.append(new_gv_list)
return new_replica_grads, packing
else:
return replica_grads, None
def unpack_small_tensors(replica_grads, packing):
"""Undo the structure alterations to replica_grads done by pack_small_tensors.
Args:
replica_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to replica_grads.
Returns:
new_replica_grads: identical to replica_grads except that concatenations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
"""
if not packing:
return replica_grads
new_replica_grads = []
num_devices = len(replica_grads)
num_packed = len(packing.keys()) // num_devices
for dev_idx, gv_list in enumerate(replica_grads):
gv_list = list(gv_list)
new_gv_list = gv_list[num_packed:]
for i in range(num_packed):
k = '%d:%d' % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_replica_grads.append(new_gv_list)
return new_replica_grads
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return gradients_impl._AggregateIndexedSlicesGradients(values) # pylint: disable=protected-access
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = gradients_impl._HandleNestedIndexedSlices(value) # pylint: disable=protected-access
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def contains_indexed_slices(value):
"""Check whether the value is `IndexedSlices` or contains `IndexedSlices`."""
if isinstance(value, ops.IndexedSlices):
return True
elif isinstance(value, (list, tuple)) and value:
return any(contains_indexed_slices(v) for v in value)
elif isinstance(value, value_lib.DistributedValues):
return contains_indexed_slices(list(value._index.values())) # pylint: disable=protected-access
else:
return False
|
export_environment
|
Exports an environment to a local folder. Hosts of the environment
are exported also.
@param env: The environment to export.
@type env: L{Environment}
@param path: Path to local directory.
@type path: string
|
# coding: utf-8
"""
Provides the exporter tool. The exporter can be used to export ComodIT entities
to local directories.
"""
from __future__ import print_function
from builtins import object
import os
from comodit_client.api.collection import EntityNotFoundException
from comodit_client.api.exceptions import PythonApiException
from comodit_client.api.host import Host
from comodit_client.rest.exceptions import ApiException
from comodit_client.util.path import ensure
import six
from comodit_client.api import orchestration
class ExportException(Exception):
"""
Exception raised by exporter in case of error.
"""
pass
class Export(object):
"""
The exporter is a tool that enables to export entities to local
directories. Exported entities may later be (re-)imported (see L{Import}).
"""
def __init__(self, force = False):
"""
Creates an exporter instance. If force flag is set, all data already
present in a destination folder are overwritten on export.
@param force: If True, force flag is set. It is not otherwise.
@type force: bool
"""
self._force = force
def _export_files_content(self, entity, output_folder):
for template in entity.files():
file_name = template.name
try:
with open(os.path.join(output_folder, file_name), "w") as f:
if six.PY2:
f.write(template.read_content().encode('utf-8'))
else:
f.write(template.read_content())
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def _export_entity(self, res, res_folder, export_files = False, export_thumb = False, backup = False):
if backup:
print("backup", res.name, "to", res_folder)
else:
print("exporting", res.name, "to", res_folder)
# Ensures local repository does not contain stale data
if(os.path.exists(res_folder) and len(os.listdir(res_folder)) > 0) and not self._force:
raise ExportException(res_folder + " already exists and is not empty.")
res.dump(res_folder)
if export_files:
# Dump files' content to disk
files_folder = os.path.join(res_folder, "files")
ensure(files_folder)
self._export_files_content(res, files_folder)
if export_thumb:
# Dump thumbnail to disk
try:
content = res.read_thumbnail_content()
with open(os.path.join(res_folder, "thumb"), "wb") as f:
f.write(content)
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def export_application(self, app, path, backup = False):
"""
Exports an application to a local folder.
@param app: The application to export.
@type app: L{Application}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(app, path, True, True, backup)
def export_distribution(self, dist, path, backup = False):
"""
Exports a distribution to a local folder.
@param dist: The distribution to export.
@type dist: L{Distribution}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(dist, path, True, True, backup)
def export_platform(self, plat, path, backup = False):
"""
Exports a platform to a local folder.
@param plat: The platform to export.
@type plat: L{Platform}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(plat, path, True, backup=backup)
# MASKED: export_environment function (lines 132-147)
def export_job(self, job, path):
"""
Exports a job to a local folder.
@param job: The job to export.
@type job: L{Job}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(job, path)
def export_orchestration(self, orchestration, path):
"""
Exports a orchestration to a local folder.
@param job: The orchestration to export.
@type orchestration: L{Orchestration}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(orchestration, path)
def export_notification(self, notification, path):
"""
Exports a jobnotificationto a local folder.
@param notification: The notification to export.
@type notification: L{Notification}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(notification, path)
def export_host(self, host, path):
"""
Exports a host to a local folder. Contexts and instance are exported
also.
@param host: The host to export.
@type host: L{Host}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(host, path)
# Export instance
if host.state != Host.State.DEFINED:
try:
instance = host.get_instance()
instance.dump_json(os.path.join(path, "instance.json"))
except PythonApiException:
pass
# Export application contexts
app_folder = os.path.join(path, "applications")
ensure(app_folder)
for context in host.applications():
context.dump_json(os.path.join(app_folder, context.application + ".json"))
# Export platform context
try:
host.get_platform().dump_json(os.path.join(path, "platform.json"))
except EntityNotFoundException:
pass
# Export distribution context
try:
host.get_distribution().dump_json(os.path.join(path, "distribution.json"))
except EntityNotFoundException:
pass
def export_organization(self, org, path):
"""
Exports an organization to a local folder. Environments, applications,
distributions and platforms are exported also.
@param org: The organization to export.
@type org: L{Organization}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(org, path)
for app in org.applications():
self.export_application(app, os.path.join(path, "applications", app.name))
for dist in org.distributions():
self.export_distribution(dist, os.path.join(path, "distributions", dist.name))
for plat in org.platforms():
self.export_platform(plat, os.path.join(path, "platforms", plat.name))
for job in org.jobs():
self.export_job(job, os.path.join(path, "jobs", job.name))
for orch in org.orchestrations():
self.export_orchestration(orch, os.path.join(path, "orchestrations", orch.name))
for env in org.environments():
self.export_environment(env, os.path.join(path, "environments", env.name))
|
def export_environment(self, env, path):
"""
Exports an environment to a local folder. Hosts of the environment
are exported also.
@param env: The environment to export.
@type env: L{Environment}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(env, path)
hosts_folder = os.path.join(path, "hosts")
for host in env.hosts():
self.export_host(host, os.path.join(hosts_folder, host.name))
| 132 | 147 |
# coding: utf-8
"""
Provides the exporter tool. The exporter can be used to export ComodIT entities
to local directories.
"""
from __future__ import print_function
from builtins import object
import os
from comodit_client.api.collection import EntityNotFoundException
from comodit_client.api.exceptions import PythonApiException
from comodit_client.api.host import Host
from comodit_client.rest.exceptions import ApiException
from comodit_client.util.path import ensure
import six
from comodit_client.api import orchestration
class ExportException(Exception):
"""
Exception raised by exporter in case of error.
"""
pass
class Export(object):
"""
The exporter is a tool that enables to export entities to local
directories. Exported entities may later be (re-)imported (see L{Import}).
"""
def __init__(self, force = False):
"""
Creates an exporter instance. If force flag is set, all data already
present in a destination folder are overwritten on export.
@param force: If True, force flag is set. It is not otherwise.
@type force: bool
"""
self._force = force
def _export_files_content(self, entity, output_folder):
for template in entity.files():
file_name = template.name
try:
with open(os.path.join(output_folder, file_name), "w") as f:
if six.PY2:
f.write(template.read_content().encode('utf-8'))
else:
f.write(template.read_content())
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def _export_entity(self, res, res_folder, export_files = False, export_thumb = False, backup = False):
if backup:
print("backup", res.name, "to", res_folder)
else:
print("exporting", res.name, "to", res_folder)
# Ensures local repository does not contain stale data
if(os.path.exists(res_folder) and len(os.listdir(res_folder)) > 0) and not self._force:
raise ExportException(res_folder + " already exists and is not empty.")
res.dump(res_folder)
if export_files:
# Dump files' content to disk
files_folder = os.path.join(res_folder, "files")
ensure(files_folder)
self._export_files_content(res, files_folder)
if export_thumb:
# Dump thumbnail to disk
try:
content = res.read_thumbnail_content()
with open(os.path.join(res_folder, "thumb"), "wb") as f:
f.write(content)
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def export_application(self, app, path, backup = False):
"""
Exports an application to a local folder.
@param app: The application to export.
@type app: L{Application}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(app, path, True, True, backup)
def export_distribution(self, dist, path, backup = False):
"""
Exports a distribution to a local folder.
@param dist: The distribution to export.
@type dist: L{Distribution}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(dist, path, True, True, backup)
def export_platform(self, plat, path, backup = False):
"""
Exports a platform to a local folder.
@param plat: The platform to export.
@type plat: L{Platform}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(plat, path, True, backup=backup)
def export_environment(self, env, path):
"""
Exports an environment to a local folder. Hosts of the environment
are exported also.
@param env: The environment to export.
@type env: L{Environment}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(env, path)
hosts_folder = os.path.join(path, "hosts")
for host in env.hosts():
self.export_host(host, os.path.join(hosts_folder, host.name))
def export_job(self, job, path):
"""
Exports a job to a local folder.
@param job: The job to export.
@type job: L{Job}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(job, path)
def export_orchestration(self, orchestration, path):
"""
Exports a orchestration to a local folder.
@param job: The orchestration to export.
@type orchestration: L{Orchestration}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(orchestration, path)
def export_notification(self, notification, path):
"""
Exports a jobnotificationto a local folder.
@param notification: The notification to export.
@type notification: L{Notification}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(notification, path)
def export_host(self, host, path):
"""
Exports a host to a local folder. Contexts and instance are exported
also.
@param host: The host to export.
@type host: L{Host}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(host, path)
# Export instance
if host.state != Host.State.DEFINED:
try:
instance = host.get_instance()
instance.dump_json(os.path.join(path, "instance.json"))
except PythonApiException:
pass
# Export application contexts
app_folder = os.path.join(path, "applications")
ensure(app_folder)
for context in host.applications():
context.dump_json(os.path.join(app_folder, context.application + ".json"))
# Export platform context
try:
host.get_platform().dump_json(os.path.join(path, "platform.json"))
except EntityNotFoundException:
pass
# Export distribution context
try:
host.get_distribution().dump_json(os.path.join(path, "distribution.json"))
except EntityNotFoundException:
pass
def export_organization(self, org, path):
"""
Exports an organization to a local folder. Environments, applications,
distributions and platforms are exported also.
@param org: The organization to export.
@type org: L{Organization}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(org, path)
for app in org.applications():
self.export_application(app, os.path.join(path, "applications", app.name))
for dist in org.distributions():
self.export_distribution(dist, os.path.join(path, "distributions", dist.name))
for plat in org.platforms():
self.export_platform(plat, os.path.join(path, "platforms", plat.name))
for job in org.jobs():
self.export_job(job, os.path.join(path, "jobs", job.name))
for orch in org.orchestrations():
self.export_orchestration(orch, os.path.join(path, "orchestrations", orch.name))
for env in org.environments():
self.export_environment(env, os.path.join(path, "environments", env.name))
|
export_host
|
Exports a host to a local folder. Contexts and instance are exported
also.
@param host: The host to export.
@type host: L{Host}
@param path: Path to local directory.
@type path: string
|
# coding: utf-8
"""
Provides the exporter tool. The exporter can be used to export ComodIT entities
to local directories.
"""
from __future__ import print_function
from builtins import object
import os
from comodit_client.api.collection import EntityNotFoundException
from comodit_client.api.exceptions import PythonApiException
from comodit_client.api.host import Host
from comodit_client.rest.exceptions import ApiException
from comodit_client.util.path import ensure
import six
from comodit_client.api import orchestration
class ExportException(Exception):
"""
Exception raised by exporter in case of error.
"""
pass
class Export(object):
"""
The exporter is a tool that enables to export entities to local
directories. Exported entities may later be (re-)imported (see L{Import}).
"""
def __init__(self, force = False):
"""
Creates an exporter instance. If force flag is set, all data already
present in a destination folder are overwritten on export.
@param force: If True, force flag is set. It is not otherwise.
@type force: bool
"""
self._force = force
def _export_files_content(self, entity, output_folder):
for template in entity.files():
file_name = template.name
try:
with open(os.path.join(output_folder, file_name), "w") as f:
if six.PY2:
f.write(template.read_content().encode('utf-8'))
else:
f.write(template.read_content())
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def _export_entity(self, res, res_folder, export_files = False, export_thumb = False, backup = False):
if backup:
print("backup", res.name, "to", res_folder)
else:
print("exporting", res.name, "to", res_folder)
# Ensures local repository does not contain stale data
if(os.path.exists(res_folder) and len(os.listdir(res_folder)) > 0) and not self._force:
raise ExportException(res_folder + " already exists and is not empty.")
res.dump(res_folder)
if export_files:
# Dump files' content to disk
files_folder = os.path.join(res_folder, "files")
ensure(files_folder)
self._export_files_content(res, files_folder)
if export_thumb:
# Dump thumbnail to disk
try:
content = res.read_thumbnail_content()
with open(os.path.join(res_folder, "thumb"), "wb") as f:
f.write(content)
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def export_application(self, app, path, backup = False):
"""
Exports an application to a local folder.
@param app: The application to export.
@type app: L{Application}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(app, path, True, True, backup)
def export_distribution(self, dist, path, backup = False):
"""
Exports a distribution to a local folder.
@param dist: The distribution to export.
@type dist: L{Distribution}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(dist, path, True, True, backup)
def export_platform(self, plat, path, backup = False):
"""
Exports a platform to a local folder.
@param plat: The platform to export.
@type plat: L{Platform}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(plat, path, True, backup=backup)
def export_environment(self, env, path):
"""
Exports an environment to a local folder. Hosts of the environment
are exported also.
@param env: The environment to export.
@type env: L{Environment}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(env, path)
hosts_folder = os.path.join(path, "hosts")
for host in env.hosts():
self.export_host(host, os.path.join(hosts_folder, host.name))
def export_job(self, job, path):
"""
Exports a job to a local folder.
@param job: The job to export.
@type job: L{Job}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(job, path)
def export_orchestration(self, orchestration, path):
"""
Exports a orchestration to a local folder.
@param job: The orchestration to export.
@type orchestration: L{Orchestration}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(orchestration, path)
def export_notification(self, notification, path):
"""
Exports a jobnotificationto a local folder.
@param notification: The notification to export.
@type notification: L{Notification}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(notification, path)
# MASKED: export_host function (lines 185-222)
def export_organization(self, org, path):
"""
Exports an organization to a local folder. Environments, applications,
distributions and platforms are exported also.
@param org: The organization to export.
@type org: L{Organization}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(org, path)
for app in org.applications():
self.export_application(app, os.path.join(path, "applications", app.name))
for dist in org.distributions():
self.export_distribution(dist, os.path.join(path, "distributions", dist.name))
for plat in org.platforms():
self.export_platform(plat, os.path.join(path, "platforms", plat.name))
for job in org.jobs():
self.export_job(job, os.path.join(path, "jobs", job.name))
for orch in org.orchestrations():
self.export_orchestration(orch, os.path.join(path, "orchestrations", orch.name))
for env in org.environments():
self.export_environment(env, os.path.join(path, "environments", env.name))
|
def export_host(self, host, path):
"""
Exports a host to a local folder. Contexts and instance are exported
also.
@param host: The host to export.
@type host: L{Host}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(host, path)
# Export instance
if host.state != Host.State.DEFINED:
try:
instance = host.get_instance()
instance.dump_json(os.path.join(path, "instance.json"))
except PythonApiException:
pass
# Export application contexts
app_folder = os.path.join(path, "applications")
ensure(app_folder)
for context in host.applications():
context.dump_json(os.path.join(app_folder, context.application + ".json"))
# Export platform context
try:
host.get_platform().dump_json(os.path.join(path, "platform.json"))
except EntityNotFoundException:
pass
# Export distribution context
try:
host.get_distribution().dump_json(os.path.join(path, "distribution.json"))
except EntityNotFoundException:
pass
| 185 | 222 |
# coding: utf-8
"""
Provides the exporter tool. The exporter can be used to export ComodIT entities
to local directories.
"""
from __future__ import print_function
from builtins import object
import os
from comodit_client.api.collection import EntityNotFoundException
from comodit_client.api.exceptions import PythonApiException
from comodit_client.api.host import Host
from comodit_client.rest.exceptions import ApiException
from comodit_client.util.path import ensure
import six
from comodit_client.api import orchestration
class ExportException(Exception):
"""
Exception raised by exporter in case of error.
"""
pass
class Export(object):
"""
The exporter is a tool that enables to export entities to local
directories. Exported entities may later be (re-)imported (see L{Import}).
"""
def __init__(self, force = False):
"""
Creates an exporter instance. If force flag is set, all data already
present in a destination folder are overwritten on export.
@param force: If True, force flag is set. It is not otherwise.
@type force: bool
"""
self._force = force
def _export_files_content(self, entity, output_folder):
for template in entity.files():
file_name = template.name
try:
with open(os.path.join(output_folder, file_name), "w") as f:
if six.PY2:
f.write(template.read_content().encode('utf-8'))
else:
f.write(template.read_content())
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def _export_entity(self, res, res_folder, export_files = False, export_thumb = False, backup = False):
if backup:
print("backup", res.name, "to", res_folder)
else:
print("exporting", res.name, "to", res_folder)
# Ensures local repository does not contain stale data
if(os.path.exists(res_folder) and len(os.listdir(res_folder)) > 0) and not self._force:
raise ExportException(res_folder + " already exists and is not empty.")
res.dump(res_folder)
if export_files:
# Dump files' content to disk
files_folder = os.path.join(res_folder, "files")
ensure(files_folder)
self._export_files_content(res, files_folder)
if export_thumb:
# Dump thumbnail to disk
try:
content = res.read_thumbnail_content()
with open(os.path.join(res_folder, "thumb"), "wb") as f:
f.write(content)
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def export_application(self, app, path, backup = False):
"""
Exports an application to a local folder.
@param app: The application to export.
@type app: L{Application}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(app, path, True, True, backup)
def export_distribution(self, dist, path, backup = False):
"""
Exports a distribution to a local folder.
@param dist: The distribution to export.
@type dist: L{Distribution}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(dist, path, True, True, backup)
def export_platform(self, plat, path, backup = False):
"""
Exports a platform to a local folder.
@param plat: The platform to export.
@type plat: L{Platform}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(plat, path, True, backup=backup)
def export_environment(self, env, path):
"""
Exports an environment to a local folder. Hosts of the environment
are exported also.
@param env: The environment to export.
@type env: L{Environment}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(env, path)
hosts_folder = os.path.join(path, "hosts")
for host in env.hosts():
self.export_host(host, os.path.join(hosts_folder, host.name))
def export_job(self, job, path):
"""
Exports a job to a local folder.
@param job: The job to export.
@type job: L{Job}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(job, path)
def export_orchestration(self, orchestration, path):
"""
Exports a orchestration to a local folder.
@param job: The orchestration to export.
@type orchestration: L{Orchestration}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(orchestration, path)
def export_notification(self, notification, path):
"""
Exports a jobnotificationto a local folder.
@param notification: The notification to export.
@type notification: L{Notification}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(notification, path)
def export_host(self, host, path):
"""
Exports a host to a local folder. Contexts and instance are exported
also.
@param host: The host to export.
@type host: L{Host}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(host, path)
# Export instance
if host.state != Host.State.DEFINED:
try:
instance = host.get_instance()
instance.dump_json(os.path.join(path, "instance.json"))
except PythonApiException:
pass
# Export application contexts
app_folder = os.path.join(path, "applications")
ensure(app_folder)
for context in host.applications():
context.dump_json(os.path.join(app_folder, context.application + ".json"))
# Export platform context
try:
host.get_platform().dump_json(os.path.join(path, "platform.json"))
except EntityNotFoundException:
pass
# Export distribution context
try:
host.get_distribution().dump_json(os.path.join(path, "distribution.json"))
except EntityNotFoundException:
pass
def export_organization(self, org, path):
"""
Exports an organization to a local folder. Environments, applications,
distributions and platforms are exported also.
@param org: The organization to export.
@type org: L{Organization}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(org, path)
for app in org.applications():
self.export_application(app, os.path.join(path, "applications", app.name))
for dist in org.distributions():
self.export_distribution(dist, os.path.join(path, "distributions", dist.name))
for plat in org.platforms():
self.export_platform(plat, os.path.join(path, "platforms", plat.name))
for job in org.jobs():
self.export_job(job, os.path.join(path, "jobs", job.name))
for orch in org.orchestrations():
self.export_orchestration(orch, os.path.join(path, "orchestrations", orch.name))
for env in org.environments():
self.export_environment(env, os.path.join(path, "environments", env.name))
|
export_organization
|
Exports an organization to a local folder. Environments, applications,
distributions and platforms are exported also.
@param org: The organization to export.
@type org: L{Organization}
@param path: Path to local directory.
@type path: string
|
# coding: utf-8
"""
Provides the exporter tool. The exporter can be used to export ComodIT entities
to local directories.
"""
from __future__ import print_function
from builtins import object
import os
from comodit_client.api.collection import EntityNotFoundException
from comodit_client.api.exceptions import PythonApiException
from comodit_client.api.host import Host
from comodit_client.rest.exceptions import ApiException
from comodit_client.util.path import ensure
import six
from comodit_client.api import orchestration
class ExportException(Exception):
"""
Exception raised by exporter in case of error.
"""
pass
class Export(object):
"""
The exporter is a tool that enables to export entities to local
directories. Exported entities may later be (re-)imported (see L{Import}).
"""
def __init__(self, force = False):
"""
Creates an exporter instance. If force flag is set, all data already
present in a destination folder are overwritten on export.
@param force: If True, force flag is set. It is not otherwise.
@type force: bool
"""
self._force = force
def _export_files_content(self, entity, output_folder):
for template in entity.files():
file_name = template.name
try:
with open(os.path.join(output_folder, file_name), "w") as f:
if six.PY2:
f.write(template.read_content().encode('utf-8'))
else:
f.write(template.read_content())
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def _export_entity(self, res, res_folder, export_files = False, export_thumb = False, backup = False):
if backup:
print("backup", res.name, "to", res_folder)
else:
print("exporting", res.name, "to", res_folder)
# Ensures local repository does not contain stale data
if(os.path.exists(res_folder) and len(os.listdir(res_folder)) > 0) and not self._force:
raise ExportException(res_folder + " already exists and is not empty.")
res.dump(res_folder)
if export_files:
# Dump files' content to disk
files_folder = os.path.join(res_folder, "files")
ensure(files_folder)
self._export_files_content(res, files_folder)
if export_thumb:
# Dump thumbnail to disk
try:
content = res.read_thumbnail_content()
with open(os.path.join(res_folder, "thumb"), "wb") as f:
f.write(content)
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def export_application(self, app, path, backup = False):
"""
Exports an application to a local folder.
@param app: The application to export.
@type app: L{Application}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(app, path, True, True, backup)
def export_distribution(self, dist, path, backup = False):
"""
Exports a distribution to a local folder.
@param dist: The distribution to export.
@type dist: L{Distribution}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(dist, path, True, True, backup)
def export_platform(self, plat, path, backup = False):
"""
Exports a platform to a local folder.
@param plat: The platform to export.
@type plat: L{Platform}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(plat, path, True, backup=backup)
def export_environment(self, env, path):
"""
Exports an environment to a local folder. Hosts of the environment
are exported also.
@param env: The environment to export.
@type env: L{Environment}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(env, path)
hosts_folder = os.path.join(path, "hosts")
for host in env.hosts():
self.export_host(host, os.path.join(hosts_folder, host.name))
def export_job(self, job, path):
"""
Exports a job to a local folder.
@param job: The job to export.
@type job: L{Job}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(job, path)
def export_orchestration(self, orchestration, path):
"""
Exports a orchestration to a local folder.
@param job: The orchestration to export.
@type orchestration: L{Orchestration}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(orchestration, path)
def export_notification(self, notification, path):
"""
Exports a jobnotificationto a local folder.
@param notification: The notification to export.
@type notification: L{Notification}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(notification, path)
def export_host(self, host, path):
"""
Exports a host to a local folder. Contexts and instance are exported
also.
@param host: The host to export.
@type host: L{Host}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(host, path)
# Export instance
if host.state != Host.State.DEFINED:
try:
instance = host.get_instance()
instance.dump_json(os.path.join(path, "instance.json"))
except PythonApiException:
pass
# Export application contexts
app_folder = os.path.join(path, "applications")
ensure(app_folder)
for context in host.applications():
context.dump_json(os.path.join(app_folder, context.application + ".json"))
# Export platform context
try:
host.get_platform().dump_json(os.path.join(path, "platform.json"))
except EntityNotFoundException:
pass
# Export distribution context
try:
host.get_distribution().dump_json(os.path.join(path, "distribution.json"))
except EntityNotFoundException:
pass
# MASKED: export_organization function (lines 224-253)
|
def export_organization(self, org, path):
"""
Exports an organization to a local folder. Environments, applications,
distributions and platforms are exported also.
@param org: The organization to export.
@type org: L{Organization}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(org, path)
for app in org.applications():
self.export_application(app, os.path.join(path, "applications", app.name))
for dist in org.distributions():
self.export_distribution(dist, os.path.join(path, "distributions", dist.name))
for plat in org.platforms():
self.export_platform(plat, os.path.join(path, "platforms", plat.name))
for job in org.jobs():
self.export_job(job, os.path.join(path, "jobs", job.name))
for orch in org.orchestrations():
self.export_orchestration(orch, os.path.join(path, "orchestrations", orch.name))
for env in org.environments():
self.export_environment(env, os.path.join(path, "environments", env.name))
| 224 | 253 |
# coding: utf-8
"""
Provides the exporter tool. The exporter can be used to export ComodIT entities
to local directories.
"""
from __future__ import print_function
from builtins import object
import os
from comodit_client.api.collection import EntityNotFoundException
from comodit_client.api.exceptions import PythonApiException
from comodit_client.api.host import Host
from comodit_client.rest.exceptions import ApiException
from comodit_client.util.path import ensure
import six
from comodit_client.api import orchestration
class ExportException(Exception):
"""
Exception raised by exporter in case of error.
"""
pass
class Export(object):
"""
The exporter is a tool that enables to export entities to local
directories. Exported entities may later be (re-)imported (see L{Import}).
"""
def __init__(self, force = False):
"""
Creates an exporter instance. If force flag is set, all data already
present in a destination folder are overwritten on export.
@param force: If True, force flag is set. It is not otherwise.
@type force: bool
"""
self._force = force
def _export_files_content(self, entity, output_folder):
for template in entity.files():
file_name = template.name
try:
with open(os.path.join(output_folder, file_name), "w") as f:
if six.PY2:
f.write(template.read_content().encode('utf-8'))
else:
f.write(template.read_content())
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def _export_entity(self, res, res_folder, export_files = False, export_thumb = False, backup = False):
if backup:
print("backup", res.name, "to", res_folder)
else:
print("exporting", res.name, "to", res_folder)
# Ensures local repository does not contain stale data
if(os.path.exists(res_folder) and len(os.listdir(res_folder)) > 0) and not self._force:
raise ExportException(res_folder + " already exists and is not empty.")
res.dump(res_folder)
if export_files:
# Dump files' content to disk
files_folder = os.path.join(res_folder, "files")
ensure(files_folder)
self._export_files_content(res, files_folder)
if export_thumb:
# Dump thumbnail to disk
try:
content = res.read_thumbnail_content()
with open(os.path.join(res_folder, "thumb"), "wb") as f:
f.write(content)
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def export_application(self, app, path, backup = False):
"""
Exports an application to a local folder.
@param app: The application to export.
@type app: L{Application}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(app, path, True, True, backup)
def export_distribution(self, dist, path, backup = False):
"""
Exports a distribution to a local folder.
@param dist: The distribution to export.
@type dist: L{Distribution}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(dist, path, True, True, backup)
def export_platform(self, plat, path, backup = False):
"""
Exports a platform to a local folder.
@param plat: The platform to export.
@type plat: L{Platform}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(plat, path, True, backup=backup)
def export_environment(self, env, path):
"""
Exports an environment to a local folder. Hosts of the environment
are exported also.
@param env: The environment to export.
@type env: L{Environment}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(env, path)
hosts_folder = os.path.join(path, "hosts")
for host in env.hosts():
self.export_host(host, os.path.join(hosts_folder, host.name))
def export_job(self, job, path):
"""
Exports a job to a local folder.
@param job: The job to export.
@type job: L{Job}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(job, path)
def export_orchestration(self, orchestration, path):
"""
Exports a orchestration to a local folder.
@param job: The orchestration to export.
@type orchestration: L{Orchestration}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(orchestration, path)
def export_notification(self, notification, path):
"""
Exports a jobnotificationto a local folder.
@param notification: The notification to export.
@type notification: L{Notification}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(notification, path)
def export_host(self, host, path):
"""
Exports a host to a local folder. Contexts and instance are exported
also.
@param host: The host to export.
@type host: L{Host}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(host, path)
# Export instance
if host.state != Host.State.DEFINED:
try:
instance = host.get_instance()
instance.dump_json(os.path.join(path, "instance.json"))
except PythonApiException:
pass
# Export application contexts
app_folder = os.path.join(path, "applications")
ensure(app_folder)
for context in host.applications():
context.dump_json(os.path.join(app_folder, context.application + ".json"))
# Export platform context
try:
host.get_platform().dump_json(os.path.join(path, "platform.json"))
except EntityNotFoundException:
pass
# Export distribution context
try:
host.get_distribution().dump_json(os.path.join(path, "distribution.json"))
except EntityNotFoundException:
pass
def export_organization(self, org, path):
"""
Exports an organization to a local folder. Environments, applications,
distributions and platforms are exported also.
@param org: The organization to export.
@type org: L{Organization}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(org, path)
for app in org.applications():
self.export_application(app, os.path.join(path, "applications", app.name))
for dist in org.distributions():
self.export_distribution(dist, os.path.join(path, "distributions", dist.name))
for plat in org.platforms():
self.export_platform(plat, os.path.join(path, "platforms", plat.name))
for job in org.jobs():
self.export_job(job, os.path.join(path, "jobs", job.name))
for orch in org.orchestrations():
self.export_orchestration(orch, os.path.join(path, "orchestrations", orch.name))
for env in org.environments():
self.export_environment(env, os.path.join(path, "environments", env.name))
|
_ensure_tf_install
|
Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init module for TensorFlow Model Optimization Python API.
```
import tensorflow_model_optimization as tfmot
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# We need to put some imports inside a function call below, and the function
# call needs to come before the *actual* imports that populate the
# tensorflow_model_optimization namespace. Hence, we disable this lint check
# throughout the file.
#
# pylint: disable=g-import-not-at-top
# Ensure TensorFlow is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import tensorflow, too.
# MASKED: _ensure_tf_install function (lines 38-71)
_ensure_tf_install()
import inspect as _inspect
import os as _os
import sys as _sys
# To ensure users only access the expected public API, the API structure is
# created in the `api` directory. Import all api modules.
# pylint: disable=wildcard-import
from tensorflow_model_optimization.python.core.api import *
# pylint: enable=wildcard-import
# Use sparsity module to fetch the path for the `api` directory.
# This handles all techniques, not just sparsity.
_API_MODULE = sparsity # pylint: disable=undefined-variable
# Returns $(install_dir)/tensorflow_model_optimization/api
_sparsity_api_dir = _os.path.dirname(
_os.path.dirname(_inspect.getfile(_API_MODULE)))
# Add the `api` directory to `__path__` so that `from * import module` works.
_current_module = _sys.modules[__name__]
if not hasattr(_current_module, '__path__'):
__path__ = [_sparsity_api_dir]
elif _os.path.dirname(_inspect.getfile(_API_MODULE)) not in __path__:
__path__.append(_sparsity_api_dir)
# Delete python module so that users only access the code using the API path
# rather than using the code directory structure.
# This will disallow usage such as `tfmot.python.core.sparsity.keras`.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
# pylint: enable=undefined-variable
|
def _ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print more informative error message, then reraise.
print(
'\n\nFailed to import TensorFlow. Please note that TensorFlow is not '
'installed by default when you install TensorFlow Model Optimization. This '
'is so that users can decide whether to install the GPU-enabled '
'TensorFlow package. To use TensorFlow Model Optimization, please install '
'the most recent version of TensorFlow, by following instructions at '
'https://tensorflow.org/install.\n\n')
raise
import distutils.version
#
# Update this whenever we need to depend on a newer TensorFlow release.
#
required_tensorflow_version = '1.14.0'
if (distutils.version.LooseVersion(tf.version.VERSION) <
distutils.version.LooseVersion(required_tensorflow_version)):
raise ImportError(
'This version of TensorFlow Model Optimization requires TensorFlow '
'version >= {required}; Detected an installation of version {present}. '
'Please upgrade TensorFlow to proceed.'.format(
required=required_tensorflow_version, present=tf.__version__))
| 38 | 71 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init module for TensorFlow Model Optimization Python API.
```
import tensorflow_model_optimization as tfmot
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# We need to put some imports inside a function call below, and the function
# call needs to come before the *actual* imports that populate the
# tensorflow_model_optimization namespace. Hence, we disable this lint check
# throughout the file.
#
# pylint: disable=g-import-not-at-top
# Ensure TensorFlow is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import tensorflow, too.
def _ensure_tf_install(): # pylint: disable=g-statement-before-imports
"""Attempt to import tensorflow, and ensure its version is sufficient.
Raises:
ImportError: if either tensorflow is not importable or its version is
inadequate.
"""
try:
import tensorflow as tf
except ImportError:
# Print more informative error message, then reraise.
print(
'\n\nFailed to import TensorFlow. Please note that TensorFlow is not '
'installed by default when you install TensorFlow Model Optimization. This '
'is so that users can decide whether to install the GPU-enabled '
'TensorFlow package. To use TensorFlow Model Optimization, please install '
'the most recent version of TensorFlow, by following instructions at '
'https://tensorflow.org/install.\n\n')
raise
import distutils.version
#
# Update this whenever we need to depend on a newer TensorFlow release.
#
required_tensorflow_version = '1.14.0'
if (distutils.version.LooseVersion(tf.version.VERSION) <
distutils.version.LooseVersion(required_tensorflow_version)):
raise ImportError(
'This version of TensorFlow Model Optimization requires TensorFlow '
'version >= {required}; Detected an installation of version {present}. '
'Please upgrade TensorFlow to proceed.'.format(
required=required_tensorflow_version, present=tf.__version__))
_ensure_tf_install()
import inspect as _inspect
import os as _os
import sys as _sys
# To ensure users only access the expected public API, the API structure is
# created in the `api` directory. Import all api modules.
# pylint: disable=wildcard-import
from tensorflow_model_optimization.python.core.api import *
# pylint: enable=wildcard-import
# Use sparsity module to fetch the path for the `api` directory.
# This handles all techniques, not just sparsity.
_API_MODULE = sparsity # pylint: disable=undefined-variable
# Returns $(install_dir)/tensorflow_model_optimization/api
_sparsity_api_dir = _os.path.dirname(
_os.path.dirname(_inspect.getfile(_API_MODULE)))
# Add the `api` directory to `__path__` so that `from * import module` works.
_current_module = _sys.modules[__name__]
if not hasattr(_current_module, '__path__'):
__path__ = [_sparsity_api_dir]
elif _os.path.dirname(_inspect.getfile(_API_MODULE)) not in __path__:
__path__.append(_sparsity_api_dir)
# Delete python module so that users only access the code using the API path
# rather than using the code directory structure.
# This will disallow usage such as `tfmot.python.core.sparsity.keras`.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
# pylint: enable=undefined-variable
|
_try_cast
|
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
arr : ndarray, scalar, list, tuple, iterator (catchall)
Excludes: ExtensionArray, Series, Index.
dtype : np.dtype, ExtensionDtype or None
copy : bool
If False, don't copy the data if not needed.
raise_cast_failure : bool
If True, and if a dtype is specified, raise errors during casting.
Otherwise an object array is returned.
|
"""
Constructor functions intended to be shared by pd.array, Series.__init__,
and Index.__new__.
These should not depend on core.internals.
"""
from __future__ import annotations
from collections import abc
from typing import TYPE_CHECKING, Any, Optional, Sequence, Union, cast
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj
from pandas.core.dtypes.base import ExtensionDtype, registry
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_cast_to_integer_array,
maybe_castable,
maybe_convert_platform,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_datetime64_ns_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndexClass,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
import pandas.core.common as com
if TYPE_CHECKING:
from pandas import ExtensionArray, Index, Series
def array(
data: Union[Sequence[object], AnyArrayLike],
dtype: Optional[Dtype] = None,
copy: bool = True,
) -> ExtensionArray:
"""
Create an array.
.. versionadded:: 0.24.0
Parameters
----------
data : Sequence of objects
The scalars inside `data` should be instances of the
scalar type for `dtype`. It's expected that `data`
represents a 1-dimensional array of data.
When `data` is an Index or Series, the underlying array
will be extracted from `data`.
dtype : str, np.dtype, or ExtensionDtype, optional
The dtype to use for the array. This may be a NumPy
dtype or an extension type registered with pandas using
:meth:`pandas.api.extensions.register_extension_dtype`.
If not specified, there are two possibilities:
1. When `data` is a :class:`Series`, :class:`Index`, or
:class:`ExtensionArray`, the `dtype` will be taken
from the data.
2. Otherwise, pandas will attempt to infer the `dtype`
from the data.
Note that when `data` is a NumPy array, ``data.dtype`` is
*not* used for inferring the array type. This is because
NumPy cannot represent all the types of data that can be
held in extension arrays.
Currently, pandas will infer an extension dtype for sequences of
============================== =====================================
Scalar Type Array Type
============================== =====================================
:class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
:class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
:class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
:class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
:class:`int` :class:`pandas.arrays.IntegerArray`
:class:`float` :class:`pandas.arrays.FloatingArray`
:class:`str` :class:`pandas.arrays.StringArray`
:class:`bool` :class:`pandas.arrays.BooleanArray`
============================== =====================================
For all other cases, NumPy's usual inference rules will be used.
.. versionchanged:: 1.0.0
Pandas infers nullable-integer dtype for integer data,
string dtype for string data, and nullable-boolean dtype
for boolean data.
.. versionchanged:: 1.2.0
Pandas now also infers nullable-floating dtype for float-like
input data
copy : bool, default True
Whether to copy the data, even if not necessary. Depending
on the type of `data`, creating the new array may require
copying data, even if ``copy=False``.
Returns
-------
ExtensionArray
The newly created array.
Raises
------
ValueError
When `data` is not 1-dimensional.
See Also
--------
numpy.array : Construct a NumPy array.
Series : Construct a pandas Series.
Index : Construct a pandas Index.
arrays.PandasArray : ExtensionArray wrapping a NumPy array.
Series.array : Extract the array stored within a Series.
Notes
-----
Omitting the `dtype` argument means pandas will attempt to infer the
best array type from the values in the data. As new array types are
added by pandas and 3rd party libraries, the "best" array type may
change. We recommend specifying `dtype` to ensure that
1. the correct array type for the data is returned
2. the returned array type doesn't change as new extension types
are added by pandas and third-party libraries
Additionally, if the underlying memory representation of the returned
array matters, we recommend specifying the `dtype` as a concrete object
rather than a string alias or allowing it to be inferred. For example,
a future version of pandas or a 3rd-party library may include a
dedicated ExtensionArray for string data. In this event, the following
would no longer return a :class:`arrays.PandasArray` backed by a NumPy
array.
>>> pd.array(['a', 'b'], dtype=str)
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
This would instead return the new ExtensionArray dedicated for string
data. If you really need the new array to be backed by a NumPy array,
specify that in the dtype.
>>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
Finally, Pandas has arrays that mostly overlap with NumPy
* :class:`arrays.DatetimeArray`
* :class:`arrays.TimedeltaArray`
When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
rather than a ``PandasArray``. This is for symmetry with the case of
timezone-aware data, which NumPy does not natively support.
>>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
<DatetimeArray>
['2015-01-01 00:00:00', '2016-01-01 00:00:00']
Length: 2, dtype: datetime64[ns]
>>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
<TimedeltaArray>
['0 days 01:00:00', '0 days 02:00:00']
Length: 2, dtype: timedelta64[ns]
Examples
--------
If a dtype is not specified, pandas will infer the best dtype from the values.
See the description of `dtype` for the types pandas infers for.
>>> pd.array([1, 2])
<IntegerArray>
[1, 2]
Length: 2, dtype: Int64
>>> pd.array([1, 2, np.nan])
<IntegerArray>
[1, 2, <NA>]
Length: 3, dtype: Int64
>>> pd.array([1.1, 2.2])
<FloatingArray>
[1.1, 2.2]
Length: 2, dtype: Float64
>>> pd.array(["a", None, "c"])
<StringArray>
['a', <NA>, 'c']
Length: 3, dtype: string
>>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
<PeriodArray>
['2000-01-01', '2000-01-01']
Length: 2, dtype: period[D]
You can use the string alias for `dtype`
>>> pd.array(['a', 'b', 'a'], dtype='category')
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
Or specify the actual dtype
>>> pd.array(['a', 'b', 'a'],
... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
['a', 'b', 'a']
Categories (3, object): ['a' < 'b' < 'c']
If pandas does not infer a dedicated extension type a
:class:`arrays.PandasArray` is returned.
>>> pd.array([1 + 1j, 3 + 2j])
<PandasArray>
[(1+1j), (3+2j)]
Length: 2, dtype: complex128
As mentioned in the "Notes" section, new extension types may be added
in the future (by pandas or 3rd party libraries), causing the return
value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype`
as a NumPy dtype if you need to ensure there's no future change in
behavior.
>>> pd.array([1, 2], dtype=np.dtype("int32"))
<PandasArray>
[1, 2]
Length: 2, dtype: int32
`data` must be 1-dimensional. A ValueError is raised when the input
has the wrong dimensionality.
>>> pd.array(1)
Traceback (most recent call last):
...
ValueError: Cannot pass scalar '1' to 'pandas.array'.
"""
from pandas.core.arrays import (
BooleanArray,
DatetimeArray,
FloatingArray,
IntegerArray,
IntervalArray,
PandasArray,
StringArray,
TimedeltaArray,
period_array,
)
if lib.is_scalar(data):
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
raise ValueError(msg)
if dtype is None and isinstance(
data, (ABCSeries, ABCIndexClass, ABCExtensionArray)
):
dtype = data.dtype
data = extract_array(data, extract_numpy=True)
# this returns None for not-found dtypes.
if isinstance(dtype, str):
dtype = registry.find(dtype) or dtype
if is_extension_array_dtype(dtype):
cls = cast(ExtensionDtype, dtype).construct_array_type()
return cls._from_sequence(data, dtype=dtype, copy=copy)
if dtype is None:
inferred_dtype = lib.infer_dtype(data, skipna=True)
if inferred_dtype == "period":
try:
return period_array(data, copy=copy)
except IncompatibleFrequency:
# We may have a mixture of frequencies.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype == "interval":
try:
return IntervalArray(data, copy=copy)
except ValueError:
# We may have a mixture of `closed` here.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype.startswith("datetime"):
# datetime, datetime64
try:
return DatetimeArray._from_sequence(data, copy=copy)
except ValueError:
# Mixture of timezones, fall back to PandasArray
pass
elif inferred_dtype.startswith("timedelta"):
# timedelta, timedelta64
return TimedeltaArray._from_sequence(data, copy=copy)
elif inferred_dtype == "string":
return StringArray._from_sequence(data, copy=copy)
elif inferred_dtype == "integer":
return IntegerArray._from_sequence(data, copy=copy)
elif inferred_dtype in ("floating", "mixed-integer-float"):
return FloatingArray._from_sequence(data, copy=copy)
elif inferred_dtype == "boolean":
return BooleanArray._from_sequence(data, copy=copy)
# Pandas overrides NumPy for
# 1. datetime64[ns]
# 2. timedelta64[ns]
# so that a DatetimeArray is returned.
if is_datetime64_ns_dtype(dtype):
return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
elif is_timedelta64_ns_dtype(dtype):
return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
return result
def extract_array(obj: AnyArrayLike, extract_numpy: bool = False) -> ArrayLike:
"""
Extract the ndarray or ExtensionArray from a Series or Index.
For all other types, `obj` is just returned as is.
Parameters
----------
obj : object
For Series / Index, the underlying ExtensionArray is unboxed.
For Numpy-backed ExtensionArrays, the ndarray is extracted.
extract_numpy : bool, default False
Whether to extract the ndarray from a PandasArray
Returns
-------
arr : object
Examples
--------
>>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
['a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
Other objects like lists, arrays, and DataFrames are just passed through.
>>> extract_array([1, 2, 3])
[1, 2, 3]
For an ndarray-backed Series / Index a PandasArray is returned.
>>> extract_array(pd.Series([1, 2, 3]))
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
To extract all the way down to the ndarray, pass ``extract_numpy=True``.
>>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
array([1, 2, 3])
"""
if isinstance(obj, (ABCIndexClass, ABCSeries)):
obj = obj.array
if extract_numpy and isinstance(obj, ABCPandasArray):
obj = obj.to_numpy()
# error: Incompatible return value type (got "Index", expected "ExtensionArray")
# error: Incompatible return value type (got "Series", expected "ExtensionArray")
return obj # type: ignore[return-value]
def sanitize_array(
data,
index: Optional[Index],
dtype: Optional[DtypeObj] = None,
copy: bool = False,
raise_cast_failure: bool = False,
) -> ArrayLike:
"""
Sanitize input data to an ndarray or ExtensionArray, copy if specified,
coerce to the dtype if specified.
"""
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
# extract ndarray or ExtensionArray, ensure we have no PandasArray
data = extract_array(data, extract_numpy=True)
# GH#846
if isinstance(data, np.ndarray):
if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
# possibility of nan -> garbage
try:
subarr = _try_cast(data, dtype, copy, True)
except ValueError:
if copy:
subarr = data.copy()
else:
subarr = np.array(data, copy=False)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
elif isinstance(data, ABCExtensionArray):
# it is already ensured above this is not a PandasArray
subarr = data
if dtype is not None:
subarr = subarr.astype(dtype, copy=copy)
elif copy:
subarr = subarr.copy()
return subarr
elif isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and len(data) > 0:
if isinstance(data, set):
# Raise only for unordered sets, e.g., not for dict_keys
raise TypeError("Set type is unordered")
data = list(data)
if dtype is not None:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH#16804
arr = np.arange(data.start, data.stop, data.step, dtype="int64")
subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
elif lib.is_scalar(data) and index is not None and dtype is not None:
data = maybe_cast_to_datetime(data, dtype)
if not lib.is_scalar(data):
data = data[0]
subarr = construct_1d_arraylike_from_scalar(data, len(index), dtype)
else:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
# scalar like, GH
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype
)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise ValueError("Data must be 1-dimensional")
else:
subarr = com.asarray_tuplesafe(data, dtype=dtype)
if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)):
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, str):
# GH#16605
# If not empty convert the data to dtype
# GH#19853: If data is a scalar, subarr has already the result
if not lib.is_scalar(data):
if not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
is_object_or_str_dtype = is_object_dtype(dtype) or is_string_dtype(dtype)
if is_object_dtype(subarr.dtype) and not is_object_or_str_dtype:
inferred = lib.infer_dtype(subarr, skipna=False)
if inferred in {"interval", "period"}:
subarr = array(subarr)
return subarr
# MASKED: _try_cast function (lines 538-593)
def is_empty_data(data: Any) -> bool:
"""
Utility to check if a Series is instantiated with empty data,
which does not contain dtype information.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
Returns
-------
bool
"""
is_none = data is None
is_list_like_without_dtype = is_list_like(data) and not hasattr(data, "dtype")
is_simple_empty = is_list_like_without_dtype and not data
return is_none or is_simple_empty
def create_series_with_explicit_dtype(
data: Any = None,
index: Optional[Union[ArrayLike, Index]] = None,
dtype: Optional[Dtype] = None,
name: Optional[str] = None,
copy: bool = False,
fastpath: bool = False,
dtype_if_empty: Dtype = object,
) -> Series:
"""
Helper to pass an explicit dtype when instantiating an empty Series.
This silences a DeprecationWarning described in GitHub-17261.
Parameters
----------
data : Mirrored from Series.__init__
index : Mirrored from Series.__init__
dtype : Mirrored from Series.__init__
name : Mirrored from Series.__init__
copy : Mirrored from Series.__init__
fastpath : Mirrored from Series.__init__
dtype_if_empty : str, numpy.dtype, or ExtensionDtype
This dtype will be passed explicitly if an empty Series will
be instantiated.
Returns
-------
Series
"""
from pandas.core.series import Series
if is_empty_data(data) and dtype is None:
dtype = dtype_if_empty
return Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath
)
|
def _try_cast(arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool):
"""
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
arr : ndarray, scalar, list, tuple, iterator (catchall)
Excludes: ExtensionArray, Series, Index.
dtype : np.dtype, ExtensionDtype or None
copy : bool
If False, don't copy the data if not needed.
raise_cast_failure : bool
If True, and if a dtype is specified, raise errors during casting.
Otherwise an object array is returned.
"""
# perf shortcut as this is the most common case
if isinstance(arr, np.ndarray):
if maybe_castable(arr) and not copy and dtype is None:
return arr
if isinstance(dtype, ExtensionDtype) and (dtype.kind != "M" or is_sparse(dtype)):
# create an extension array from its dtype
# DatetimeTZ case needs to go through maybe_cast_to_datetime but
# SparseDtype does not
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
return subarr
try:
# GH#15832: Check if we are requesting a numeric dtype and
# that we can convert the data to the requested dtype.
if is_integer_dtype(dtype):
# this will raise if we have e.g. floats
maybe_cast_to_integer_array(arr, dtype)
subarr = arr
else:
subarr = maybe_cast_to_datetime(arr, dtype)
# Take care in creating object arrays (but iterators are not
# supported):
if is_object_dtype(dtype) and (
is_list_like(subarr)
and not (is_iterator(subarr) or isinstance(subarr, np.ndarray))
):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_array_dtype(subarr):
subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy)
except OutOfBoundsDatetime:
# in case of out of bound datetime64 -> always raise
raise
except (ValueError, TypeError):
if dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
| 538 | 593 |
"""
Constructor functions intended to be shared by pd.array, Series.__init__,
and Index.__new__.
These should not depend on core.internals.
"""
from __future__ import annotations
from collections import abc
from typing import TYPE_CHECKING, Any, Optional, Sequence, Union, cast
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj
from pandas.core.dtypes.base import ExtensionDtype, registry
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_cast_to_integer_array,
maybe_castable,
maybe_convert_platform,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_datetime64_ns_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndexClass,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
import pandas.core.common as com
if TYPE_CHECKING:
from pandas import ExtensionArray, Index, Series
def array(
data: Union[Sequence[object], AnyArrayLike],
dtype: Optional[Dtype] = None,
copy: bool = True,
) -> ExtensionArray:
"""
Create an array.
.. versionadded:: 0.24.0
Parameters
----------
data : Sequence of objects
The scalars inside `data` should be instances of the
scalar type for `dtype`. It's expected that `data`
represents a 1-dimensional array of data.
When `data` is an Index or Series, the underlying array
will be extracted from `data`.
dtype : str, np.dtype, or ExtensionDtype, optional
The dtype to use for the array. This may be a NumPy
dtype or an extension type registered with pandas using
:meth:`pandas.api.extensions.register_extension_dtype`.
If not specified, there are two possibilities:
1. When `data` is a :class:`Series`, :class:`Index`, or
:class:`ExtensionArray`, the `dtype` will be taken
from the data.
2. Otherwise, pandas will attempt to infer the `dtype`
from the data.
Note that when `data` is a NumPy array, ``data.dtype`` is
*not* used for inferring the array type. This is because
NumPy cannot represent all the types of data that can be
held in extension arrays.
Currently, pandas will infer an extension dtype for sequences of
============================== =====================================
Scalar Type Array Type
============================== =====================================
:class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
:class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
:class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
:class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
:class:`int` :class:`pandas.arrays.IntegerArray`
:class:`float` :class:`pandas.arrays.FloatingArray`
:class:`str` :class:`pandas.arrays.StringArray`
:class:`bool` :class:`pandas.arrays.BooleanArray`
============================== =====================================
For all other cases, NumPy's usual inference rules will be used.
.. versionchanged:: 1.0.0
Pandas infers nullable-integer dtype for integer data,
string dtype for string data, and nullable-boolean dtype
for boolean data.
.. versionchanged:: 1.2.0
Pandas now also infers nullable-floating dtype for float-like
input data
copy : bool, default True
Whether to copy the data, even if not necessary. Depending
on the type of `data`, creating the new array may require
copying data, even if ``copy=False``.
Returns
-------
ExtensionArray
The newly created array.
Raises
------
ValueError
When `data` is not 1-dimensional.
See Also
--------
numpy.array : Construct a NumPy array.
Series : Construct a pandas Series.
Index : Construct a pandas Index.
arrays.PandasArray : ExtensionArray wrapping a NumPy array.
Series.array : Extract the array stored within a Series.
Notes
-----
Omitting the `dtype` argument means pandas will attempt to infer the
best array type from the values in the data. As new array types are
added by pandas and 3rd party libraries, the "best" array type may
change. We recommend specifying `dtype` to ensure that
1. the correct array type for the data is returned
2. the returned array type doesn't change as new extension types
are added by pandas and third-party libraries
Additionally, if the underlying memory representation of the returned
array matters, we recommend specifying the `dtype` as a concrete object
rather than a string alias or allowing it to be inferred. For example,
a future version of pandas or a 3rd-party library may include a
dedicated ExtensionArray for string data. In this event, the following
would no longer return a :class:`arrays.PandasArray` backed by a NumPy
array.
>>> pd.array(['a', 'b'], dtype=str)
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
This would instead return the new ExtensionArray dedicated for string
data. If you really need the new array to be backed by a NumPy array,
specify that in the dtype.
>>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
Finally, Pandas has arrays that mostly overlap with NumPy
* :class:`arrays.DatetimeArray`
* :class:`arrays.TimedeltaArray`
When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
rather than a ``PandasArray``. This is for symmetry with the case of
timezone-aware data, which NumPy does not natively support.
>>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
<DatetimeArray>
['2015-01-01 00:00:00', '2016-01-01 00:00:00']
Length: 2, dtype: datetime64[ns]
>>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
<TimedeltaArray>
['0 days 01:00:00', '0 days 02:00:00']
Length: 2, dtype: timedelta64[ns]
Examples
--------
If a dtype is not specified, pandas will infer the best dtype from the values.
See the description of `dtype` for the types pandas infers for.
>>> pd.array([1, 2])
<IntegerArray>
[1, 2]
Length: 2, dtype: Int64
>>> pd.array([1, 2, np.nan])
<IntegerArray>
[1, 2, <NA>]
Length: 3, dtype: Int64
>>> pd.array([1.1, 2.2])
<FloatingArray>
[1.1, 2.2]
Length: 2, dtype: Float64
>>> pd.array(["a", None, "c"])
<StringArray>
['a', <NA>, 'c']
Length: 3, dtype: string
>>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
<PeriodArray>
['2000-01-01', '2000-01-01']
Length: 2, dtype: period[D]
You can use the string alias for `dtype`
>>> pd.array(['a', 'b', 'a'], dtype='category')
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
Or specify the actual dtype
>>> pd.array(['a', 'b', 'a'],
... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
['a', 'b', 'a']
Categories (3, object): ['a' < 'b' < 'c']
If pandas does not infer a dedicated extension type a
:class:`arrays.PandasArray` is returned.
>>> pd.array([1 + 1j, 3 + 2j])
<PandasArray>
[(1+1j), (3+2j)]
Length: 2, dtype: complex128
As mentioned in the "Notes" section, new extension types may be added
in the future (by pandas or 3rd party libraries), causing the return
value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype`
as a NumPy dtype if you need to ensure there's no future change in
behavior.
>>> pd.array([1, 2], dtype=np.dtype("int32"))
<PandasArray>
[1, 2]
Length: 2, dtype: int32
`data` must be 1-dimensional. A ValueError is raised when the input
has the wrong dimensionality.
>>> pd.array(1)
Traceback (most recent call last):
...
ValueError: Cannot pass scalar '1' to 'pandas.array'.
"""
from pandas.core.arrays import (
BooleanArray,
DatetimeArray,
FloatingArray,
IntegerArray,
IntervalArray,
PandasArray,
StringArray,
TimedeltaArray,
period_array,
)
if lib.is_scalar(data):
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
raise ValueError(msg)
if dtype is None and isinstance(
data, (ABCSeries, ABCIndexClass, ABCExtensionArray)
):
dtype = data.dtype
data = extract_array(data, extract_numpy=True)
# this returns None for not-found dtypes.
if isinstance(dtype, str):
dtype = registry.find(dtype) or dtype
if is_extension_array_dtype(dtype):
cls = cast(ExtensionDtype, dtype).construct_array_type()
return cls._from_sequence(data, dtype=dtype, copy=copy)
if dtype is None:
inferred_dtype = lib.infer_dtype(data, skipna=True)
if inferred_dtype == "period":
try:
return period_array(data, copy=copy)
except IncompatibleFrequency:
# We may have a mixture of frequencies.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype == "interval":
try:
return IntervalArray(data, copy=copy)
except ValueError:
# We may have a mixture of `closed` here.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype.startswith("datetime"):
# datetime, datetime64
try:
return DatetimeArray._from_sequence(data, copy=copy)
except ValueError:
# Mixture of timezones, fall back to PandasArray
pass
elif inferred_dtype.startswith("timedelta"):
# timedelta, timedelta64
return TimedeltaArray._from_sequence(data, copy=copy)
elif inferred_dtype == "string":
return StringArray._from_sequence(data, copy=copy)
elif inferred_dtype == "integer":
return IntegerArray._from_sequence(data, copy=copy)
elif inferred_dtype in ("floating", "mixed-integer-float"):
return FloatingArray._from_sequence(data, copy=copy)
elif inferred_dtype == "boolean":
return BooleanArray._from_sequence(data, copy=copy)
# Pandas overrides NumPy for
# 1. datetime64[ns]
# 2. timedelta64[ns]
# so that a DatetimeArray is returned.
if is_datetime64_ns_dtype(dtype):
return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
elif is_timedelta64_ns_dtype(dtype):
return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
return result
def extract_array(obj: AnyArrayLike, extract_numpy: bool = False) -> ArrayLike:
"""
Extract the ndarray or ExtensionArray from a Series or Index.
For all other types, `obj` is just returned as is.
Parameters
----------
obj : object
For Series / Index, the underlying ExtensionArray is unboxed.
For Numpy-backed ExtensionArrays, the ndarray is extracted.
extract_numpy : bool, default False
Whether to extract the ndarray from a PandasArray
Returns
-------
arr : object
Examples
--------
>>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
['a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
Other objects like lists, arrays, and DataFrames are just passed through.
>>> extract_array([1, 2, 3])
[1, 2, 3]
For an ndarray-backed Series / Index a PandasArray is returned.
>>> extract_array(pd.Series([1, 2, 3]))
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
To extract all the way down to the ndarray, pass ``extract_numpy=True``.
>>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
array([1, 2, 3])
"""
if isinstance(obj, (ABCIndexClass, ABCSeries)):
obj = obj.array
if extract_numpy and isinstance(obj, ABCPandasArray):
obj = obj.to_numpy()
# error: Incompatible return value type (got "Index", expected "ExtensionArray")
# error: Incompatible return value type (got "Series", expected "ExtensionArray")
return obj # type: ignore[return-value]
def sanitize_array(
data,
index: Optional[Index],
dtype: Optional[DtypeObj] = None,
copy: bool = False,
raise_cast_failure: bool = False,
) -> ArrayLike:
"""
Sanitize input data to an ndarray or ExtensionArray, copy if specified,
coerce to the dtype if specified.
"""
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
# extract ndarray or ExtensionArray, ensure we have no PandasArray
data = extract_array(data, extract_numpy=True)
# GH#846
if isinstance(data, np.ndarray):
if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
# possibility of nan -> garbage
try:
subarr = _try_cast(data, dtype, copy, True)
except ValueError:
if copy:
subarr = data.copy()
else:
subarr = np.array(data, copy=False)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
elif isinstance(data, ABCExtensionArray):
# it is already ensured above this is not a PandasArray
subarr = data
if dtype is not None:
subarr = subarr.astype(dtype, copy=copy)
elif copy:
subarr = subarr.copy()
return subarr
elif isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and len(data) > 0:
if isinstance(data, set):
# Raise only for unordered sets, e.g., not for dict_keys
raise TypeError("Set type is unordered")
data = list(data)
if dtype is not None:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH#16804
arr = np.arange(data.start, data.stop, data.step, dtype="int64")
subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
elif lib.is_scalar(data) and index is not None and dtype is not None:
data = maybe_cast_to_datetime(data, dtype)
if not lib.is_scalar(data):
data = data[0]
subarr = construct_1d_arraylike_from_scalar(data, len(index), dtype)
else:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
# scalar like, GH
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype
)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise ValueError("Data must be 1-dimensional")
else:
subarr = com.asarray_tuplesafe(data, dtype=dtype)
if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)):
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, str):
# GH#16605
# If not empty convert the data to dtype
# GH#19853: If data is a scalar, subarr has already the result
if not lib.is_scalar(data):
if not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
is_object_or_str_dtype = is_object_dtype(dtype) or is_string_dtype(dtype)
if is_object_dtype(subarr.dtype) and not is_object_or_str_dtype:
inferred = lib.infer_dtype(subarr, skipna=False)
if inferred in {"interval", "period"}:
subarr = array(subarr)
return subarr
def _try_cast(arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool):
"""
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
arr : ndarray, scalar, list, tuple, iterator (catchall)
Excludes: ExtensionArray, Series, Index.
dtype : np.dtype, ExtensionDtype or None
copy : bool
If False, don't copy the data if not needed.
raise_cast_failure : bool
If True, and if a dtype is specified, raise errors during casting.
Otherwise an object array is returned.
"""
# perf shortcut as this is the most common case
if isinstance(arr, np.ndarray):
if maybe_castable(arr) and not copy and dtype is None:
return arr
if isinstance(dtype, ExtensionDtype) and (dtype.kind != "M" or is_sparse(dtype)):
# create an extension array from its dtype
# DatetimeTZ case needs to go through maybe_cast_to_datetime but
# SparseDtype does not
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
return subarr
try:
# GH#15832: Check if we are requesting a numeric dtype and
# that we can convert the data to the requested dtype.
if is_integer_dtype(dtype):
# this will raise if we have e.g. floats
maybe_cast_to_integer_array(arr, dtype)
subarr = arr
else:
subarr = maybe_cast_to_datetime(arr, dtype)
# Take care in creating object arrays (but iterators are not
# supported):
if is_object_dtype(dtype) and (
is_list_like(subarr)
and not (is_iterator(subarr) or isinstance(subarr, np.ndarray))
):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_array_dtype(subarr):
subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy)
except OutOfBoundsDatetime:
# in case of out of bound datetime64 -> always raise
raise
except (ValueError, TypeError):
if dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
def is_empty_data(data: Any) -> bool:
"""
Utility to check if a Series is instantiated with empty data,
which does not contain dtype information.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
Returns
-------
bool
"""
is_none = data is None
is_list_like_without_dtype = is_list_like(data) and not hasattr(data, "dtype")
is_simple_empty = is_list_like_without_dtype and not data
return is_none or is_simple_empty
def create_series_with_explicit_dtype(
data: Any = None,
index: Optional[Union[ArrayLike, Index]] = None,
dtype: Optional[Dtype] = None,
name: Optional[str] = None,
copy: bool = False,
fastpath: bool = False,
dtype_if_empty: Dtype = object,
) -> Series:
"""
Helper to pass an explicit dtype when instantiating an empty Series.
This silences a DeprecationWarning described in GitHub-17261.
Parameters
----------
data : Mirrored from Series.__init__
index : Mirrored from Series.__init__
dtype : Mirrored from Series.__init__
name : Mirrored from Series.__init__
copy : Mirrored from Series.__init__
fastpath : Mirrored from Series.__init__
dtype_if_empty : str, numpy.dtype, or ExtensionDtype
This dtype will be passed explicitly if an empty Series will
be instantiated.
Returns
-------
Series
"""
from pandas.core.series import Series
if is_empty_data(data) and dtype is None:
dtype = dtype_if_empty
return Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath
)
|
is_empty_data
|
Utility to check if a Series is instantiated with empty data,
which does not contain dtype information.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
Returns
-------
bool
|
"""
Constructor functions intended to be shared by pd.array, Series.__init__,
and Index.__new__.
These should not depend on core.internals.
"""
from __future__ import annotations
from collections import abc
from typing import TYPE_CHECKING, Any, Optional, Sequence, Union, cast
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj
from pandas.core.dtypes.base import ExtensionDtype, registry
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_cast_to_integer_array,
maybe_castable,
maybe_convert_platform,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_datetime64_ns_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndexClass,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
import pandas.core.common as com
if TYPE_CHECKING:
from pandas import ExtensionArray, Index, Series
def array(
data: Union[Sequence[object], AnyArrayLike],
dtype: Optional[Dtype] = None,
copy: bool = True,
) -> ExtensionArray:
"""
Create an array.
.. versionadded:: 0.24.0
Parameters
----------
data : Sequence of objects
The scalars inside `data` should be instances of the
scalar type for `dtype`. It's expected that `data`
represents a 1-dimensional array of data.
When `data` is an Index or Series, the underlying array
will be extracted from `data`.
dtype : str, np.dtype, or ExtensionDtype, optional
The dtype to use for the array. This may be a NumPy
dtype or an extension type registered with pandas using
:meth:`pandas.api.extensions.register_extension_dtype`.
If not specified, there are two possibilities:
1. When `data` is a :class:`Series`, :class:`Index`, or
:class:`ExtensionArray`, the `dtype` will be taken
from the data.
2. Otherwise, pandas will attempt to infer the `dtype`
from the data.
Note that when `data` is a NumPy array, ``data.dtype`` is
*not* used for inferring the array type. This is because
NumPy cannot represent all the types of data that can be
held in extension arrays.
Currently, pandas will infer an extension dtype for sequences of
============================== =====================================
Scalar Type Array Type
============================== =====================================
:class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
:class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
:class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
:class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
:class:`int` :class:`pandas.arrays.IntegerArray`
:class:`float` :class:`pandas.arrays.FloatingArray`
:class:`str` :class:`pandas.arrays.StringArray`
:class:`bool` :class:`pandas.arrays.BooleanArray`
============================== =====================================
For all other cases, NumPy's usual inference rules will be used.
.. versionchanged:: 1.0.0
Pandas infers nullable-integer dtype for integer data,
string dtype for string data, and nullable-boolean dtype
for boolean data.
.. versionchanged:: 1.2.0
Pandas now also infers nullable-floating dtype for float-like
input data
copy : bool, default True
Whether to copy the data, even if not necessary. Depending
on the type of `data`, creating the new array may require
copying data, even if ``copy=False``.
Returns
-------
ExtensionArray
The newly created array.
Raises
------
ValueError
When `data` is not 1-dimensional.
See Also
--------
numpy.array : Construct a NumPy array.
Series : Construct a pandas Series.
Index : Construct a pandas Index.
arrays.PandasArray : ExtensionArray wrapping a NumPy array.
Series.array : Extract the array stored within a Series.
Notes
-----
Omitting the `dtype` argument means pandas will attempt to infer the
best array type from the values in the data. As new array types are
added by pandas and 3rd party libraries, the "best" array type may
change. We recommend specifying `dtype` to ensure that
1. the correct array type for the data is returned
2. the returned array type doesn't change as new extension types
are added by pandas and third-party libraries
Additionally, if the underlying memory representation of the returned
array matters, we recommend specifying the `dtype` as a concrete object
rather than a string alias or allowing it to be inferred. For example,
a future version of pandas or a 3rd-party library may include a
dedicated ExtensionArray for string data. In this event, the following
would no longer return a :class:`arrays.PandasArray` backed by a NumPy
array.
>>> pd.array(['a', 'b'], dtype=str)
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
This would instead return the new ExtensionArray dedicated for string
data. If you really need the new array to be backed by a NumPy array,
specify that in the dtype.
>>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
Finally, Pandas has arrays that mostly overlap with NumPy
* :class:`arrays.DatetimeArray`
* :class:`arrays.TimedeltaArray`
When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
rather than a ``PandasArray``. This is for symmetry with the case of
timezone-aware data, which NumPy does not natively support.
>>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
<DatetimeArray>
['2015-01-01 00:00:00', '2016-01-01 00:00:00']
Length: 2, dtype: datetime64[ns]
>>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
<TimedeltaArray>
['0 days 01:00:00', '0 days 02:00:00']
Length: 2, dtype: timedelta64[ns]
Examples
--------
If a dtype is not specified, pandas will infer the best dtype from the values.
See the description of `dtype` for the types pandas infers for.
>>> pd.array([1, 2])
<IntegerArray>
[1, 2]
Length: 2, dtype: Int64
>>> pd.array([1, 2, np.nan])
<IntegerArray>
[1, 2, <NA>]
Length: 3, dtype: Int64
>>> pd.array([1.1, 2.2])
<FloatingArray>
[1.1, 2.2]
Length: 2, dtype: Float64
>>> pd.array(["a", None, "c"])
<StringArray>
['a', <NA>, 'c']
Length: 3, dtype: string
>>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
<PeriodArray>
['2000-01-01', '2000-01-01']
Length: 2, dtype: period[D]
You can use the string alias for `dtype`
>>> pd.array(['a', 'b', 'a'], dtype='category')
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
Or specify the actual dtype
>>> pd.array(['a', 'b', 'a'],
... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
['a', 'b', 'a']
Categories (3, object): ['a' < 'b' < 'c']
If pandas does not infer a dedicated extension type a
:class:`arrays.PandasArray` is returned.
>>> pd.array([1 + 1j, 3 + 2j])
<PandasArray>
[(1+1j), (3+2j)]
Length: 2, dtype: complex128
As mentioned in the "Notes" section, new extension types may be added
in the future (by pandas or 3rd party libraries), causing the return
value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype`
as a NumPy dtype if you need to ensure there's no future change in
behavior.
>>> pd.array([1, 2], dtype=np.dtype("int32"))
<PandasArray>
[1, 2]
Length: 2, dtype: int32
`data` must be 1-dimensional. A ValueError is raised when the input
has the wrong dimensionality.
>>> pd.array(1)
Traceback (most recent call last):
...
ValueError: Cannot pass scalar '1' to 'pandas.array'.
"""
from pandas.core.arrays import (
BooleanArray,
DatetimeArray,
FloatingArray,
IntegerArray,
IntervalArray,
PandasArray,
StringArray,
TimedeltaArray,
period_array,
)
if lib.is_scalar(data):
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
raise ValueError(msg)
if dtype is None and isinstance(
data, (ABCSeries, ABCIndexClass, ABCExtensionArray)
):
dtype = data.dtype
data = extract_array(data, extract_numpy=True)
# this returns None for not-found dtypes.
if isinstance(dtype, str):
dtype = registry.find(dtype) or dtype
if is_extension_array_dtype(dtype):
cls = cast(ExtensionDtype, dtype).construct_array_type()
return cls._from_sequence(data, dtype=dtype, copy=copy)
if dtype is None:
inferred_dtype = lib.infer_dtype(data, skipna=True)
if inferred_dtype == "period":
try:
return period_array(data, copy=copy)
except IncompatibleFrequency:
# We may have a mixture of frequencies.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype == "interval":
try:
return IntervalArray(data, copy=copy)
except ValueError:
# We may have a mixture of `closed` here.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype.startswith("datetime"):
# datetime, datetime64
try:
return DatetimeArray._from_sequence(data, copy=copy)
except ValueError:
# Mixture of timezones, fall back to PandasArray
pass
elif inferred_dtype.startswith("timedelta"):
# timedelta, timedelta64
return TimedeltaArray._from_sequence(data, copy=copy)
elif inferred_dtype == "string":
return StringArray._from_sequence(data, copy=copy)
elif inferred_dtype == "integer":
return IntegerArray._from_sequence(data, copy=copy)
elif inferred_dtype in ("floating", "mixed-integer-float"):
return FloatingArray._from_sequence(data, copy=copy)
elif inferred_dtype == "boolean":
return BooleanArray._from_sequence(data, copy=copy)
# Pandas overrides NumPy for
# 1. datetime64[ns]
# 2. timedelta64[ns]
# so that a DatetimeArray is returned.
if is_datetime64_ns_dtype(dtype):
return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
elif is_timedelta64_ns_dtype(dtype):
return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
return result
def extract_array(obj: AnyArrayLike, extract_numpy: bool = False) -> ArrayLike:
"""
Extract the ndarray or ExtensionArray from a Series or Index.
For all other types, `obj` is just returned as is.
Parameters
----------
obj : object
For Series / Index, the underlying ExtensionArray is unboxed.
For Numpy-backed ExtensionArrays, the ndarray is extracted.
extract_numpy : bool, default False
Whether to extract the ndarray from a PandasArray
Returns
-------
arr : object
Examples
--------
>>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
['a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
Other objects like lists, arrays, and DataFrames are just passed through.
>>> extract_array([1, 2, 3])
[1, 2, 3]
For an ndarray-backed Series / Index a PandasArray is returned.
>>> extract_array(pd.Series([1, 2, 3]))
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
To extract all the way down to the ndarray, pass ``extract_numpy=True``.
>>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
array([1, 2, 3])
"""
if isinstance(obj, (ABCIndexClass, ABCSeries)):
obj = obj.array
if extract_numpy and isinstance(obj, ABCPandasArray):
obj = obj.to_numpy()
# error: Incompatible return value type (got "Index", expected "ExtensionArray")
# error: Incompatible return value type (got "Series", expected "ExtensionArray")
return obj # type: ignore[return-value]
def sanitize_array(
data,
index: Optional[Index],
dtype: Optional[DtypeObj] = None,
copy: bool = False,
raise_cast_failure: bool = False,
) -> ArrayLike:
"""
Sanitize input data to an ndarray or ExtensionArray, copy if specified,
coerce to the dtype if specified.
"""
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
# extract ndarray or ExtensionArray, ensure we have no PandasArray
data = extract_array(data, extract_numpy=True)
# GH#846
if isinstance(data, np.ndarray):
if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
# possibility of nan -> garbage
try:
subarr = _try_cast(data, dtype, copy, True)
except ValueError:
if copy:
subarr = data.copy()
else:
subarr = np.array(data, copy=False)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
elif isinstance(data, ABCExtensionArray):
# it is already ensured above this is not a PandasArray
subarr = data
if dtype is not None:
subarr = subarr.astype(dtype, copy=copy)
elif copy:
subarr = subarr.copy()
return subarr
elif isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and len(data) > 0:
if isinstance(data, set):
# Raise only for unordered sets, e.g., not for dict_keys
raise TypeError("Set type is unordered")
data = list(data)
if dtype is not None:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH#16804
arr = np.arange(data.start, data.stop, data.step, dtype="int64")
subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
elif lib.is_scalar(data) and index is not None and dtype is not None:
data = maybe_cast_to_datetime(data, dtype)
if not lib.is_scalar(data):
data = data[0]
subarr = construct_1d_arraylike_from_scalar(data, len(index), dtype)
else:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
# scalar like, GH
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype
)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise ValueError("Data must be 1-dimensional")
else:
subarr = com.asarray_tuplesafe(data, dtype=dtype)
if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)):
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, str):
# GH#16605
# If not empty convert the data to dtype
# GH#19853: If data is a scalar, subarr has already the result
if not lib.is_scalar(data):
if not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
is_object_or_str_dtype = is_object_dtype(dtype) or is_string_dtype(dtype)
if is_object_dtype(subarr.dtype) and not is_object_or_str_dtype:
inferred = lib.infer_dtype(subarr, skipna=False)
if inferred in {"interval", "period"}:
subarr = array(subarr)
return subarr
def _try_cast(arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool):
"""
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
arr : ndarray, scalar, list, tuple, iterator (catchall)
Excludes: ExtensionArray, Series, Index.
dtype : np.dtype, ExtensionDtype or None
copy : bool
If False, don't copy the data if not needed.
raise_cast_failure : bool
If True, and if a dtype is specified, raise errors during casting.
Otherwise an object array is returned.
"""
# perf shortcut as this is the most common case
if isinstance(arr, np.ndarray):
if maybe_castable(arr) and not copy and dtype is None:
return arr
if isinstance(dtype, ExtensionDtype) and (dtype.kind != "M" or is_sparse(dtype)):
# create an extension array from its dtype
# DatetimeTZ case needs to go through maybe_cast_to_datetime but
# SparseDtype does not
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
return subarr
try:
# GH#15832: Check if we are requesting a numeric dtype and
# that we can convert the data to the requested dtype.
if is_integer_dtype(dtype):
# this will raise if we have e.g. floats
maybe_cast_to_integer_array(arr, dtype)
subarr = arr
else:
subarr = maybe_cast_to_datetime(arr, dtype)
# Take care in creating object arrays (but iterators are not
# supported):
if is_object_dtype(dtype) and (
is_list_like(subarr)
and not (is_iterator(subarr) or isinstance(subarr, np.ndarray))
):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_array_dtype(subarr):
subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy)
except OutOfBoundsDatetime:
# in case of out of bound datetime64 -> always raise
raise
except (ValueError, TypeError):
if dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
# MASKED: is_empty_data function (lines 596-613)
def create_series_with_explicit_dtype(
data: Any = None,
index: Optional[Union[ArrayLike, Index]] = None,
dtype: Optional[Dtype] = None,
name: Optional[str] = None,
copy: bool = False,
fastpath: bool = False,
dtype_if_empty: Dtype = object,
) -> Series:
"""
Helper to pass an explicit dtype when instantiating an empty Series.
This silences a DeprecationWarning described in GitHub-17261.
Parameters
----------
data : Mirrored from Series.__init__
index : Mirrored from Series.__init__
dtype : Mirrored from Series.__init__
name : Mirrored from Series.__init__
copy : Mirrored from Series.__init__
fastpath : Mirrored from Series.__init__
dtype_if_empty : str, numpy.dtype, or ExtensionDtype
This dtype will be passed explicitly if an empty Series will
be instantiated.
Returns
-------
Series
"""
from pandas.core.series import Series
if is_empty_data(data) and dtype is None:
dtype = dtype_if_empty
return Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath
)
|
def is_empty_data(data: Any) -> bool:
"""
Utility to check if a Series is instantiated with empty data,
which does not contain dtype information.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
Returns
-------
bool
"""
is_none = data is None
is_list_like_without_dtype = is_list_like(data) and not hasattr(data, "dtype")
is_simple_empty = is_list_like_without_dtype and not data
return is_none or is_simple_empty
| 596 | 613 |
"""
Constructor functions intended to be shared by pd.array, Series.__init__,
and Index.__new__.
These should not depend on core.internals.
"""
from __future__ import annotations
from collections import abc
from typing import TYPE_CHECKING, Any, Optional, Sequence, Union, cast
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj
from pandas.core.dtypes.base import ExtensionDtype, registry
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_cast_to_integer_array,
maybe_castable,
maybe_convert_platform,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_datetime64_ns_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndexClass,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
import pandas.core.common as com
if TYPE_CHECKING:
from pandas import ExtensionArray, Index, Series
def array(
data: Union[Sequence[object], AnyArrayLike],
dtype: Optional[Dtype] = None,
copy: bool = True,
) -> ExtensionArray:
"""
Create an array.
.. versionadded:: 0.24.0
Parameters
----------
data : Sequence of objects
The scalars inside `data` should be instances of the
scalar type for `dtype`. It's expected that `data`
represents a 1-dimensional array of data.
When `data` is an Index or Series, the underlying array
will be extracted from `data`.
dtype : str, np.dtype, or ExtensionDtype, optional
The dtype to use for the array. This may be a NumPy
dtype or an extension type registered with pandas using
:meth:`pandas.api.extensions.register_extension_dtype`.
If not specified, there are two possibilities:
1. When `data` is a :class:`Series`, :class:`Index`, or
:class:`ExtensionArray`, the `dtype` will be taken
from the data.
2. Otherwise, pandas will attempt to infer the `dtype`
from the data.
Note that when `data` is a NumPy array, ``data.dtype`` is
*not* used for inferring the array type. This is because
NumPy cannot represent all the types of data that can be
held in extension arrays.
Currently, pandas will infer an extension dtype for sequences of
============================== =====================================
Scalar Type Array Type
============================== =====================================
:class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
:class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
:class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
:class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
:class:`int` :class:`pandas.arrays.IntegerArray`
:class:`float` :class:`pandas.arrays.FloatingArray`
:class:`str` :class:`pandas.arrays.StringArray`
:class:`bool` :class:`pandas.arrays.BooleanArray`
============================== =====================================
For all other cases, NumPy's usual inference rules will be used.
.. versionchanged:: 1.0.0
Pandas infers nullable-integer dtype for integer data,
string dtype for string data, and nullable-boolean dtype
for boolean data.
.. versionchanged:: 1.2.0
Pandas now also infers nullable-floating dtype for float-like
input data
copy : bool, default True
Whether to copy the data, even if not necessary. Depending
on the type of `data`, creating the new array may require
copying data, even if ``copy=False``.
Returns
-------
ExtensionArray
The newly created array.
Raises
------
ValueError
When `data` is not 1-dimensional.
See Also
--------
numpy.array : Construct a NumPy array.
Series : Construct a pandas Series.
Index : Construct a pandas Index.
arrays.PandasArray : ExtensionArray wrapping a NumPy array.
Series.array : Extract the array stored within a Series.
Notes
-----
Omitting the `dtype` argument means pandas will attempt to infer the
best array type from the values in the data. As new array types are
added by pandas and 3rd party libraries, the "best" array type may
change. We recommend specifying `dtype` to ensure that
1. the correct array type for the data is returned
2. the returned array type doesn't change as new extension types
are added by pandas and third-party libraries
Additionally, if the underlying memory representation of the returned
array matters, we recommend specifying the `dtype` as a concrete object
rather than a string alias or allowing it to be inferred. For example,
a future version of pandas or a 3rd-party library may include a
dedicated ExtensionArray for string data. In this event, the following
would no longer return a :class:`arrays.PandasArray` backed by a NumPy
array.
>>> pd.array(['a', 'b'], dtype=str)
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
This would instead return the new ExtensionArray dedicated for string
data. If you really need the new array to be backed by a NumPy array,
specify that in the dtype.
>>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
Finally, Pandas has arrays that mostly overlap with NumPy
* :class:`arrays.DatetimeArray`
* :class:`arrays.TimedeltaArray`
When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
rather than a ``PandasArray``. This is for symmetry with the case of
timezone-aware data, which NumPy does not natively support.
>>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
<DatetimeArray>
['2015-01-01 00:00:00', '2016-01-01 00:00:00']
Length: 2, dtype: datetime64[ns]
>>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
<TimedeltaArray>
['0 days 01:00:00', '0 days 02:00:00']
Length: 2, dtype: timedelta64[ns]
Examples
--------
If a dtype is not specified, pandas will infer the best dtype from the values.
See the description of `dtype` for the types pandas infers for.
>>> pd.array([1, 2])
<IntegerArray>
[1, 2]
Length: 2, dtype: Int64
>>> pd.array([1, 2, np.nan])
<IntegerArray>
[1, 2, <NA>]
Length: 3, dtype: Int64
>>> pd.array([1.1, 2.2])
<FloatingArray>
[1.1, 2.2]
Length: 2, dtype: Float64
>>> pd.array(["a", None, "c"])
<StringArray>
['a', <NA>, 'c']
Length: 3, dtype: string
>>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
<PeriodArray>
['2000-01-01', '2000-01-01']
Length: 2, dtype: period[D]
You can use the string alias for `dtype`
>>> pd.array(['a', 'b', 'a'], dtype='category')
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
Or specify the actual dtype
>>> pd.array(['a', 'b', 'a'],
... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
['a', 'b', 'a']
Categories (3, object): ['a' < 'b' < 'c']
If pandas does not infer a dedicated extension type a
:class:`arrays.PandasArray` is returned.
>>> pd.array([1 + 1j, 3 + 2j])
<PandasArray>
[(1+1j), (3+2j)]
Length: 2, dtype: complex128
As mentioned in the "Notes" section, new extension types may be added
in the future (by pandas or 3rd party libraries), causing the return
value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype`
as a NumPy dtype if you need to ensure there's no future change in
behavior.
>>> pd.array([1, 2], dtype=np.dtype("int32"))
<PandasArray>
[1, 2]
Length: 2, dtype: int32
`data` must be 1-dimensional. A ValueError is raised when the input
has the wrong dimensionality.
>>> pd.array(1)
Traceback (most recent call last):
...
ValueError: Cannot pass scalar '1' to 'pandas.array'.
"""
from pandas.core.arrays import (
BooleanArray,
DatetimeArray,
FloatingArray,
IntegerArray,
IntervalArray,
PandasArray,
StringArray,
TimedeltaArray,
period_array,
)
if lib.is_scalar(data):
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
raise ValueError(msg)
if dtype is None and isinstance(
data, (ABCSeries, ABCIndexClass, ABCExtensionArray)
):
dtype = data.dtype
data = extract_array(data, extract_numpy=True)
# this returns None for not-found dtypes.
if isinstance(dtype, str):
dtype = registry.find(dtype) or dtype
if is_extension_array_dtype(dtype):
cls = cast(ExtensionDtype, dtype).construct_array_type()
return cls._from_sequence(data, dtype=dtype, copy=copy)
if dtype is None:
inferred_dtype = lib.infer_dtype(data, skipna=True)
if inferred_dtype == "period":
try:
return period_array(data, copy=copy)
except IncompatibleFrequency:
# We may have a mixture of frequencies.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype == "interval":
try:
return IntervalArray(data, copy=copy)
except ValueError:
# We may have a mixture of `closed` here.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype.startswith("datetime"):
# datetime, datetime64
try:
return DatetimeArray._from_sequence(data, copy=copy)
except ValueError:
# Mixture of timezones, fall back to PandasArray
pass
elif inferred_dtype.startswith("timedelta"):
# timedelta, timedelta64
return TimedeltaArray._from_sequence(data, copy=copy)
elif inferred_dtype == "string":
return StringArray._from_sequence(data, copy=copy)
elif inferred_dtype == "integer":
return IntegerArray._from_sequence(data, copy=copy)
elif inferred_dtype in ("floating", "mixed-integer-float"):
return FloatingArray._from_sequence(data, copy=copy)
elif inferred_dtype == "boolean":
return BooleanArray._from_sequence(data, copy=copy)
# Pandas overrides NumPy for
# 1. datetime64[ns]
# 2. timedelta64[ns]
# so that a DatetimeArray is returned.
if is_datetime64_ns_dtype(dtype):
return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
elif is_timedelta64_ns_dtype(dtype):
return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
return result
def extract_array(obj: AnyArrayLike, extract_numpy: bool = False) -> ArrayLike:
"""
Extract the ndarray or ExtensionArray from a Series or Index.
For all other types, `obj` is just returned as is.
Parameters
----------
obj : object
For Series / Index, the underlying ExtensionArray is unboxed.
For Numpy-backed ExtensionArrays, the ndarray is extracted.
extract_numpy : bool, default False
Whether to extract the ndarray from a PandasArray
Returns
-------
arr : object
Examples
--------
>>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
['a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
Other objects like lists, arrays, and DataFrames are just passed through.
>>> extract_array([1, 2, 3])
[1, 2, 3]
For an ndarray-backed Series / Index a PandasArray is returned.
>>> extract_array(pd.Series([1, 2, 3]))
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
To extract all the way down to the ndarray, pass ``extract_numpy=True``.
>>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
array([1, 2, 3])
"""
if isinstance(obj, (ABCIndexClass, ABCSeries)):
obj = obj.array
if extract_numpy and isinstance(obj, ABCPandasArray):
obj = obj.to_numpy()
# error: Incompatible return value type (got "Index", expected "ExtensionArray")
# error: Incompatible return value type (got "Series", expected "ExtensionArray")
return obj # type: ignore[return-value]
def sanitize_array(
data,
index: Optional[Index],
dtype: Optional[DtypeObj] = None,
copy: bool = False,
raise_cast_failure: bool = False,
) -> ArrayLike:
"""
Sanitize input data to an ndarray or ExtensionArray, copy if specified,
coerce to the dtype if specified.
"""
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
# extract ndarray or ExtensionArray, ensure we have no PandasArray
data = extract_array(data, extract_numpy=True)
# GH#846
if isinstance(data, np.ndarray):
if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
# possibility of nan -> garbage
try:
subarr = _try_cast(data, dtype, copy, True)
except ValueError:
if copy:
subarr = data.copy()
else:
subarr = np.array(data, copy=False)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
elif isinstance(data, ABCExtensionArray):
# it is already ensured above this is not a PandasArray
subarr = data
if dtype is not None:
subarr = subarr.astype(dtype, copy=copy)
elif copy:
subarr = subarr.copy()
return subarr
elif isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and len(data) > 0:
if isinstance(data, set):
# Raise only for unordered sets, e.g., not for dict_keys
raise TypeError("Set type is unordered")
data = list(data)
if dtype is not None:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH#16804
arr = np.arange(data.start, data.stop, data.step, dtype="int64")
subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
elif lib.is_scalar(data) and index is not None and dtype is not None:
data = maybe_cast_to_datetime(data, dtype)
if not lib.is_scalar(data):
data = data[0]
subarr = construct_1d_arraylike_from_scalar(data, len(index), dtype)
else:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
# scalar like, GH
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype
)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise ValueError("Data must be 1-dimensional")
else:
subarr = com.asarray_tuplesafe(data, dtype=dtype)
if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)):
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, str):
# GH#16605
# If not empty convert the data to dtype
# GH#19853: If data is a scalar, subarr has already the result
if not lib.is_scalar(data):
if not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
is_object_or_str_dtype = is_object_dtype(dtype) or is_string_dtype(dtype)
if is_object_dtype(subarr.dtype) and not is_object_or_str_dtype:
inferred = lib.infer_dtype(subarr, skipna=False)
if inferred in {"interval", "period"}:
subarr = array(subarr)
return subarr
def _try_cast(arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool):
"""
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
arr : ndarray, scalar, list, tuple, iterator (catchall)
Excludes: ExtensionArray, Series, Index.
dtype : np.dtype, ExtensionDtype or None
copy : bool
If False, don't copy the data if not needed.
raise_cast_failure : bool
If True, and if a dtype is specified, raise errors during casting.
Otherwise an object array is returned.
"""
# perf shortcut as this is the most common case
if isinstance(arr, np.ndarray):
if maybe_castable(arr) and not copy and dtype is None:
return arr
if isinstance(dtype, ExtensionDtype) and (dtype.kind != "M" or is_sparse(dtype)):
# create an extension array from its dtype
# DatetimeTZ case needs to go through maybe_cast_to_datetime but
# SparseDtype does not
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
return subarr
try:
# GH#15832: Check if we are requesting a numeric dtype and
# that we can convert the data to the requested dtype.
if is_integer_dtype(dtype):
# this will raise if we have e.g. floats
maybe_cast_to_integer_array(arr, dtype)
subarr = arr
else:
subarr = maybe_cast_to_datetime(arr, dtype)
# Take care in creating object arrays (but iterators are not
# supported):
if is_object_dtype(dtype) and (
is_list_like(subarr)
and not (is_iterator(subarr) or isinstance(subarr, np.ndarray))
):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_array_dtype(subarr):
subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy)
except OutOfBoundsDatetime:
# in case of out of bound datetime64 -> always raise
raise
except (ValueError, TypeError):
if dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
def is_empty_data(data: Any) -> bool:
"""
Utility to check if a Series is instantiated with empty data,
which does not contain dtype information.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
Returns
-------
bool
"""
is_none = data is None
is_list_like_without_dtype = is_list_like(data) and not hasattr(data, "dtype")
is_simple_empty = is_list_like_without_dtype and not data
return is_none or is_simple_empty
def create_series_with_explicit_dtype(
data: Any = None,
index: Optional[Union[ArrayLike, Index]] = None,
dtype: Optional[Dtype] = None,
name: Optional[str] = None,
copy: bool = False,
fastpath: bool = False,
dtype_if_empty: Dtype = object,
) -> Series:
"""
Helper to pass an explicit dtype when instantiating an empty Series.
This silences a DeprecationWarning described in GitHub-17261.
Parameters
----------
data : Mirrored from Series.__init__
index : Mirrored from Series.__init__
dtype : Mirrored from Series.__init__
name : Mirrored from Series.__init__
copy : Mirrored from Series.__init__
fastpath : Mirrored from Series.__init__
dtype_if_empty : str, numpy.dtype, or ExtensionDtype
This dtype will be passed explicitly if an empty Series will
be instantiated.
Returns
-------
Series
"""
from pandas.core.series import Series
if is_empty_data(data) and dtype is None:
dtype = dtype_if_empty
return Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath
)
|
load_raw_mask
|
load two kinds of mask of VOC dataset.
image_id: id of mask
class_or_object: 'class_mask' or 'object_mask' for SegmentationClass or SegmentationObject
Returns:
image: numpy of mask image.
|
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
from bs4 import BeautifulSoup as bs
import cv2
import imgaug
from utils import *
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Inference result directory
RESULTS_DIR = os.path.abspath("./inference/")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from configs import Config
# from mrcnn import model as modellib, utils
# from mrcnn import visualize
import matplotlib
# Agg backend runs without a display
matplotlib.use('Agg')
import matplotlib.pyplot as plt
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = '2012'
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# VOC DATASET MASK MAP FUNCTION
# Following codes are mapping each mask color(SegmentationClass) to ground truth index.
# - reference: https://d2l.ai/chapter_computer-vision/semantic-segmentation-and-dataset.html
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
def build_colormap2label():
"""Build a RGB color to label mapping for segmentation."""
colormap2label = np.zeros(256 ** 3)
for i, colormap in enumerate(VOC_COLORMAP):
colormap2label[(colormap[0]*256 + colormap[1])*256 + colormap[2]] = i
return colormap2label
def voc_label_indices(colormap, colormap2label):
"""Map a RGB color to a label."""
colormap = colormap.astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx]
# VOC DATASET MASK MAP FUNCTION
class VocConfig(Config):
NAME = "voc"
IMAGE_PER_GPU = 2
NUM_CLASSES = 1 + 20 # VOC 2012 have 20 classes. "1" is for background.
class InferenceConfig(VocConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
class VocDataset(Dataset):
def load_voc(self, dataset_dir, trainval, year='2012'):
"""Load a voc_year of the VOC dataset.
dataset_dir: The root directory of the VOC dataset, example: '/mnt/disk1/VOCdevkit'
trainval: 'train' or 'val' for Training or Validation
year: '2007' or '2012' for VOC dataset
"""
voc_year = 'VOC' + year
Segmentation = os.path.join(dataset_dir, voc_year, 'ImageSets', 'Segmentation')
JPEGImages = os.path.join(dataset_dir, voc_year, 'JPEGImages')
Annotations = os.path.join(dataset_dir, voc_year, 'Annotations')
SegmentationClass = os.path.join(dataset_dir, voc_year, 'SegmentationClass')
SegmentationObject = os.path.join(dataset_dir, voc_year, 'SegmentationObject')
# load classes of VOC, BG is initialed in parent class.
for idx, class_name in enumerate(VOC_CLASSES[1:]):
self.add_class("voc", idx + 1, class_name)
assert trainval in ['train', 'val']
# read segmentation annotation file
annotation_file = os.path.join(Segmentation, trainval + '.txt')
image_ids = []
with open(annotation_file) as f:
image_id_list = [line.strip() for line in f]
image_ids += image_id_list
for image_id in image_ids:
image_file_name = '{}.jpg'.format(image_id)
mask_file_name = '{}.png'.format(image_id)
xml_file_name = '{}.xml'.format(image_id)
image_path = os.path.join(JPEGImages, image_file_name)
# Parse Annotations XML File
with open(os.path.join(Annotations, xml_file_name)) as f:
soup = bs(f, 'lxml')
objects = soup.find_all('object')
image_contains_class_flag = False
for obj in objects:
class_name = obj.find('name').text
if class_name in VOC_CLASSES:
image_contains_class_flag = True
continue
if image_contains_class_flag:
class_mask_path = os.path.join(SegmentationClass, mask_file_name)
object_mask_path = os.path.join(SegmentationObject, mask_file_name)
self.add_image("voc",
image_id=image_file_name,
path=image_path,
class_mask_path=class_mask_path,
object_mask_path=object_mask_path)
# MASKED: load_raw_mask function (lines 132-146)
def load_class_label(self, image_id):
'''Mapping SegmentationClass image's color to indice of ground truth
image_id: id of mask
Return:
class_label: [height, width] matrix contains values form 0 to 20
'''
raw_mask = self.load_raw_mask(image_id, 'class_mask')
class_label = voc_label_indices(raw_mask, build_colormap2label())
return class_label
def load_mask(self, image_id):
'''Mapping annotation images to real Masks(MRCNN needed)
image_id: id of mask
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
'''
class_label = self.load_class_label(image_id)
instance_mask = self.load_raw_mask(image_id, 'object_mask')
max_indice = int(np.max(class_label))
instance_label = []
instance_class = []
for i in range(1, max_indice+1):
if not np.any(class_label==i):
continue
gt_indice = i
object_filter = class_label == i
object_filter = object_filter.astype(np.uint8)
object_filter = np.dstack((object_filter,object_filter,object_filter))
filtered = np.multiply(object_filter, instance_mask)
gray = cv2.cvtColor(filtered, cv2.COLOR_RGB2GRAY)
max_gray = np.max(gray)
for sub_index in range(1, max_gray+1):
if not np.any(gray==sub_index):
continue
instance_filter = gray == sub_index
instance_label += [instance_filter]
instance_class += [gt_indice]
masks = np.asarray(instance_label).transpose((1,2,0))
classes_ids = np.asarray(instance_class)
return masks, classes_ids
############################################################
# Inference
############################################################
def inference(model, dataset, limit):
"""Run detection on images in the given directory."""
# Create directory
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
time_dir = "{:%Y%m%dT%H%M%S}".format(datetime.datetime.now())
time_dir = os.path.join(RESULTS_DIR, time_dir)
os.makedirs(time_dir)
# Load over images
for image_id in dataset.image_ids[:limit]:
# Load image and run detection
image = dataset.load_image(image_id)
# Detect objects
r = model.detect([image], verbose=0)[0]
# Encode image to RLE. Returns a string of multiple lines
source_id = dataset.image_info[image_id]["id"]
# Save image with masks
if len(r['class_ids']) > 0:
print('[*] {}th image has {} instance(s).'.format(image_id, len(r['class_ids'])))
visualize.display_instances(
image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'],
show_bbox=True, show_mask=True,
title="Predictions")
plt.savefig("{}/{}".format(time_dir, dataset.image_info[image_id]["id"]))
plt.close()
else:
plt.imshow(image)
plt.savefig("{}/noinstance_{}".format(time_dir, dataset.image_info[image_id]["id"]))
print('[*] {}th image have no instance.'.format(image_id))
plt.close()
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on PASCAL VOC.')
parser.add_argument("--command",
metavar="<command>",
default='train',
help="'train' or 'inference' on PASCAL VOC")
parser.add_argument('--dataset',
default="/data/lktime-seg-tp/dataset/PASCALVOC/VOCdevkit/",
help='Directory of the PASCAL VOC dataset')
parser.add_argument('--year',
default='2012',
help='Year of the PASCAL VOC dataset (2007 or 2012) (default=2012)')
parser.add_argument('--model',
default="/path/to/weights.h5",
help="Path to weights .h5 file or 'voc'")
parser.add_argument('--logs',
default='./logs',
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=10,
metavar="<image count>",
help='Images to use for evaluation (default=10)')
# TODO
'''
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip PASCAL VOC files (default=False)',
type=bool)
'''
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.logs)
#print("Auto Download: ", args.download)
# Configurations
if args.command == "train":
config = VocConfig()
else:
config = InferenceConfig()
config.display()
# Create model
# if args.command == "train":
# model = modellib.MaskRCNN(mode="training", config=config,
# model_dir=args.logs)
# else:
# model = modellib.MaskRCNN(mode="inference", config=config,
# model_dir=args.logs)
# Select weights file to load
# if args.model.lower() == "coco":
# model_path = COCO_WEIGHTS_PATH
# elif args.model.lower() == "last":
# # Find last trained weights
# model_path = model.find_last()
# elif args.model.lower() == "imagenet":
# # Start from ImageNet trained weights
# model_path = model.get_imagenet_weights()
# else:
# model_path = args.model
# Load weights
# if args.model.lower() == "coco":
# # Exclude the last layers because they require a matching
# # number of classes
# model.load_weights(model_path, by_name=True, exclude=[
# "mrcnn_class_logits", "mrcnn_bbox_fc",
# "mrcnn_bbox", "mrcnn_mask"])
# else:
# print("Loading weights ", model_path)
# model.load_weights(model_path, by_name=True)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = VocDataset()
dataset_train.load_voc(args.dataset, "train", year=args.year)
dataset_train.prepare()
# Validation dataset
dataset_val = VocDataset()
dataset_val.load_voc(args.dataset, "val", year=args.year)
dataset_val.prepare()
# Image Augmentation
# Right/Left flip 50% of the time
augmentation = imgaug.augmenters.Fliplr(0.5)
# *** This training schedule is an example. Update to your needs ***
# # Training - Stage 1
# print("Training network heads")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=40,
# layers='heads',
# augmentation=augmentation)
# # Training - Stage 2
# # Finetune layers from ResNet stage 4 and up
# print("Fine tune Resnet stage 4 and up")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=120,
# layers='4+',
# augmentation=augmentation)
# # Training - Stage 3
# # Fine tune all layers
# print("Fine tune all layers")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE / 10,
# epochs=160,
# layers='all',
# augmentation=augmentation)
# elif args.command == "inference":
# #print("evaluate have not been implemented")
# # Validation dataset
# dataset_val = VocDataset()
# voc = dataset_val.load_voc(args.dataset, "val", year=args.year)
# dataset_val.prepare()
# print("Running voc inference on {} images.".format(args.limit))
# inference(model, dataset_val, int(args.limit))
# else:
# print("'{}' is not recognized. "
# "Use 'train' or 'inference'".format(args.command))
|
def load_raw_mask(self, image_id, class_or_object):
'''load two kinds of mask of VOC dataset.
image_id: id of mask
class_or_object: 'class_mask' or 'object_mask' for SegmentationClass or SegmentationObject
Returns:
image: numpy of mask image.
'''
assert class_or_object in ['class_mask', 'object_mask']
image = skimage.io.imread(self.image_info[image_id][class_or_object+'_path'])
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
| 132 | 146 |
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
from bs4 import BeautifulSoup as bs
import cv2
import imgaug
from utils import *
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Inference result directory
RESULTS_DIR = os.path.abspath("./inference/")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from configs import Config
# from mrcnn import model as modellib, utils
# from mrcnn import visualize
import matplotlib
# Agg backend runs without a display
matplotlib.use('Agg')
import matplotlib.pyplot as plt
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = '2012'
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# VOC DATASET MASK MAP FUNCTION
# Following codes are mapping each mask color(SegmentationClass) to ground truth index.
# - reference: https://d2l.ai/chapter_computer-vision/semantic-segmentation-and-dataset.html
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
def build_colormap2label():
"""Build a RGB color to label mapping for segmentation."""
colormap2label = np.zeros(256 ** 3)
for i, colormap in enumerate(VOC_COLORMAP):
colormap2label[(colormap[0]*256 + colormap[1])*256 + colormap[2]] = i
return colormap2label
def voc_label_indices(colormap, colormap2label):
"""Map a RGB color to a label."""
colormap = colormap.astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx]
# VOC DATASET MASK MAP FUNCTION
class VocConfig(Config):
NAME = "voc"
IMAGE_PER_GPU = 2
NUM_CLASSES = 1 + 20 # VOC 2012 have 20 classes. "1" is for background.
class InferenceConfig(VocConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
class VocDataset(Dataset):
def load_voc(self, dataset_dir, trainval, year='2012'):
"""Load a voc_year of the VOC dataset.
dataset_dir: The root directory of the VOC dataset, example: '/mnt/disk1/VOCdevkit'
trainval: 'train' or 'val' for Training or Validation
year: '2007' or '2012' for VOC dataset
"""
voc_year = 'VOC' + year
Segmentation = os.path.join(dataset_dir, voc_year, 'ImageSets', 'Segmentation')
JPEGImages = os.path.join(dataset_dir, voc_year, 'JPEGImages')
Annotations = os.path.join(dataset_dir, voc_year, 'Annotations')
SegmentationClass = os.path.join(dataset_dir, voc_year, 'SegmentationClass')
SegmentationObject = os.path.join(dataset_dir, voc_year, 'SegmentationObject')
# load classes of VOC, BG is initialed in parent class.
for idx, class_name in enumerate(VOC_CLASSES[1:]):
self.add_class("voc", idx + 1, class_name)
assert trainval in ['train', 'val']
# read segmentation annotation file
annotation_file = os.path.join(Segmentation, trainval + '.txt')
image_ids = []
with open(annotation_file) as f:
image_id_list = [line.strip() for line in f]
image_ids += image_id_list
for image_id in image_ids:
image_file_name = '{}.jpg'.format(image_id)
mask_file_name = '{}.png'.format(image_id)
xml_file_name = '{}.xml'.format(image_id)
image_path = os.path.join(JPEGImages, image_file_name)
# Parse Annotations XML File
with open(os.path.join(Annotations, xml_file_name)) as f:
soup = bs(f, 'lxml')
objects = soup.find_all('object')
image_contains_class_flag = False
for obj in objects:
class_name = obj.find('name').text
if class_name in VOC_CLASSES:
image_contains_class_flag = True
continue
if image_contains_class_flag:
class_mask_path = os.path.join(SegmentationClass, mask_file_name)
object_mask_path = os.path.join(SegmentationObject, mask_file_name)
self.add_image("voc",
image_id=image_file_name,
path=image_path,
class_mask_path=class_mask_path,
object_mask_path=object_mask_path)
def load_raw_mask(self, image_id, class_or_object):
'''load two kinds of mask of VOC dataset.
image_id: id of mask
class_or_object: 'class_mask' or 'object_mask' for SegmentationClass or SegmentationObject
Returns:
image: numpy of mask image.
'''
assert class_or_object in ['class_mask', 'object_mask']
image = skimage.io.imread(self.image_info[image_id][class_or_object+'_path'])
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
def load_class_label(self, image_id):
'''Mapping SegmentationClass image's color to indice of ground truth
image_id: id of mask
Return:
class_label: [height, width] matrix contains values form 0 to 20
'''
raw_mask = self.load_raw_mask(image_id, 'class_mask')
class_label = voc_label_indices(raw_mask, build_colormap2label())
return class_label
def load_mask(self, image_id):
'''Mapping annotation images to real Masks(MRCNN needed)
image_id: id of mask
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
'''
class_label = self.load_class_label(image_id)
instance_mask = self.load_raw_mask(image_id, 'object_mask')
max_indice = int(np.max(class_label))
instance_label = []
instance_class = []
for i in range(1, max_indice+1):
if not np.any(class_label==i):
continue
gt_indice = i
object_filter = class_label == i
object_filter = object_filter.astype(np.uint8)
object_filter = np.dstack((object_filter,object_filter,object_filter))
filtered = np.multiply(object_filter, instance_mask)
gray = cv2.cvtColor(filtered, cv2.COLOR_RGB2GRAY)
max_gray = np.max(gray)
for sub_index in range(1, max_gray+1):
if not np.any(gray==sub_index):
continue
instance_filter = gray == sub_index
instance_label += [instance_filter]
instance_class += [gt_indice]
masks = np.asarray(instance_label).transpose((1,2,0))
classes_ids = np.asarray(instance_class)
return masks, classes_ids
############################################################
# Inference
############################################################
def inference(model, dataset, limit):
"""Run detection on images in the given directory."""
# Create directory
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
time_dir = "{:%Y%m%dT%H%M%S}".format(datetime.datetime.now())
time_dir = os.path.join(RESULTS_DIR, time_dir)
os.makedirs(time_dir)
# Load over images
for image_id in dataset.image_ids[:limit]:
# Load image and run detection
image = dataset.load_image(image_id)
# Detect objects
r = model.detect([image], verbose=0)[0]
# Encode image to RLE. Returns a string of multiple lines
source_id = dataset.image_info[image_id]["id"]
# Save image with masks
if len(r['class_ids']) > 0:
print('[*] {}th image has {} instance(s).'.format(image_id, len(r['class_ids'])))
visualize.display_instances(
image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'],
show_bbox=True, show_mask=True,
title="Predictions")
plt.savefig("{}/{}".format(time_dir, dataset.image_info[image_id]["id"]))
plt.close()
else:
plt.imshow(image)
plt.savefig("{}/noinstance_{}".format(time_dir, dataset.image_info[image_id]["id"]))
print('[*] {}th image have no instance.'.format(image_id))
plt.close()
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on PASCAL VOC.')
parser.add_argument("--command",
metavar="<command>",
default='train',
help="'train' or 'inference' on PASCAL VOC")
parser.add_argument('--dataset',
default="/data/lktime-seg-tp/dataset/PASCALVOC/VOCdevkit/",
help='Directory of the PASCAL VOC dataset')
parser.add_argument('--year',
default='2012',
help='Year of the PASCAL VOC dataset (2007 or 2012) (default=2012)')
parser.add_argument('--model',
default="/path/to/weights.h5",
help="Path to weights .h5 file or 'voc'")
parser.add_argument('--logs',
default='./logs',
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=10,
metavar="<image count>",
help='Images to use for evaluation (default=10)')
# TODO
'''
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip PASCAL VOC files (default=False)',
type=bool)
'''
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.logs)
#print("Auto Download: ", args.download)
# Configurations
if args.command == "train":
config = VocConfig()
else:
config = InferenceConfig()
config.display()
# Create model
# if args.command == "train":
# model = modellib.MaskRCNN(mode="training", config=config,
# model_dir=args.logs)
# else:
# model = modellib.MaskRCNN(mode="inference", config=config,
# model_dir=args.logs)
# Select weights file to load
# if args.model.lower() == "coco":
# model_path = COCO_WEIGHTS_PATH
# elif args.model.lower() == "last":
# # Find last trained weights
# model_path = model.find_last()
# elif args.model.lower() == "imagenet":
# # Start from ImageNet trained weights
# model_path = model.get_imagenet_weights()
# else:
# model_path = args.model
# Load weights
# if args.model.lower() == "coco":
# # Exclude the last layers because they require a matching
# # number of classes
# model.load_weights(model_path, by_name=True, exclude=[
# "mrcnn_class_logits", "mrcnn_bbox_fc",
# "mrcnn_bbox", "mrcnn_mask"])
# else:
# print("Loading weights ", model_path)
# model.load_weights(model_path, by_name=True)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = VocDataset()
dataset_train.load_voc(args.dataset, "train", year=args.year)
dataset_train.prepare()
# Validation dataset
dataset_val = VocDataset()
dataset_val.load_voc(args.dataset, "val", year=args.year)
dataset_val.prepare()
# Image Augmentation
# Right/Left flip 50% of the time
augmentation = imgaug.augmenters.Fliplr(0.5)
# *** This training schedule is an example. Update to your needs ***
# # Training - Stage 1
# print("Training network heads")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=40,
# layers='heads',
# augmentation=augmentation)
# # Training - Stage 2
# # Finetune layers from ResNet stage 4 and up
# print("Fine tune Resnet stage 4 and up")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=120,
# layers='4+',
# augmentation=augmentation)
# # Training - Stage 3
# # Fine tune all layers
# print("Fine tune all layers")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE / 10,
# epochs=160,
# layers='all',
# augmentation=augmentation)
# elif args.command == "inference":
# #print("evaluate have not been implemented")
# # Validation dataset
# dataset_val = VocDataset()
# voc = dataset_val.load_voc(args.dataset, "val", year=args.year)
# dataset_val.prepare()
# print("Running voc inference on {} images.".format(args.limit))
# inference(model, dataset_val, int(args.limit))
# else:
# print("'{}' is not recognized. "
# "Use 'train' or 'inference'".format(args.command))
|
set
|
Set a value on a host.
@param hostKeyId: The key id for the destination host to set the
given key. This could be the local host, in which case the hostKey
will be the same as this C{Peer}'s keyStore keyId.
@param storagePath: The path to the key to set. For instance, this
could be something like /chat/<somekey>/inbox.
@param storageValue: The value to set.
|
from tint.ssl.context import PFSContextFactory
from tint.log import Logger
from tint.protocols.tintp import ConnectionPool
from tint.protocols.tintp import TintProtocolFactory
from tint.friends import FriendsList
class Peer(object):
def __init__(self, keyStore, storage, resolver):
self.keyStore = keyStore
self.storage = storage
self.contextFactory = PFSContextFactory(self.keyStore)
self.pool = ConnectionPool(resolver, self.contextFactory, self.keyStore, self.storage)
self.protocolFactory = TintProtocolFactory(self.pool)
self.friends = FriendsList(self.storage, self.keyStore, resolver)
self.log = Logger(system=self)
def getKeyId(self):
"""
Get the keyId used by this peer (this peer's identifier).
This is stored in the key store.
"""
return self.keyStore.getKeyId()
def getPublicKey(self):
"""
Get the keyId used by this peer (this peer's identifier).
This is stored in the key store.
"""
return self.keyStore.getPublicKey()
# MASKED: set function (lines 35-50)
def get(self, hostKeyId, storagePath):
"""
Get a value from a host.
@param hostKeyId: The key id for the destination host to get the
given key. This could be the local host, in which case the hostKey
will be the same as this C{Peer}'s keyStore keyId.
@param storagePath: The path to the key to get. For instance, this
could be something like /chat/<somekey>/inbox.
"""
if hostKeyId == self.getKeyId():
self.log.debug("getting storagePath %s on self" % storagePath)
return self.storage.get(hostKeyId, storagePath)
self.log.debug("getting storagePath %s on %s" % (storagePath, hostKeyId))
return self.pool.send(hostKeyId, 'get', storagePath)
def push(self, hostKeyId, storagePath, storageValue):
"""
Given key, create a new key at <key>/<id> with the given value, where <id>
is an auto-incrementing integer value starting at 0.
"""
if hostKeyId == self.getKeyId():
return self.storage.push(hostKeyId, storagePath, storageValue)
return self.pool.send(hostKeyId, 'push', storagePath, storageValue)
def ls(self, hostKeyId, storagePath, offset, length):
"""
Given key, get all children keys (with the given offset and length). Length cannot
be more than 1000.
"""
if hostKeyId == self.getKeyId():
return self.storage.ls(hostKeyId, storagePath, offset, length)
return self.pool.send(hostKeyId, 'ls', storagePath, offset, length)
|
def set(self, hostKeyId, storagePath, storageValue):
"""
Set a value on a host.
@param hostKeyId: The key id for the destination host to set the
given key. This could be the local host, in which case the hostKey
will be the same as this C{Peer}'s keyStore keyId.
@param storagePath: The path to the key to set. For instance, this
could be something like /chat/<somekey>/inbox.
@param storageValue: The value to set.
"""
if hostKeyId == self.getKeyId():
return self.storage.set(hostKeyId, storagePath, storageValue)
return self.pool.send(hostKeyId, 'set', storagePath, storageValue)
| 35 | 50 |
from tint.ssl.context import PFSContextFactory
from tint.log import Logger
from tint.protocols.tintp import ConnectionPool
from tint.protocols.tintp import TintProtocolFactory
from tint.friends import FriendsList
class Peer(object):
def __init__(self, keyStore, storage, resolver):
self.keyStore = keyStore
self.storage = storage
self.contextFactory = PFSContextFactory(self.keyStore)
self.pool = ConnectionPool(resolver, self.contextFactory, self.keyStore, self.storage)
self.protocolFactory = TintProtocolFactory(self.pool)
self.friends = FriendsList(self.storage, self.keyStore, resolver)
self.log = Logger(system=self)
def getKeyId(self):
"""
Get the keyId used by this peer (this peer's identifier).
This is stored in the key store.
"""
return self.keyStore.getKeyId()
def getPublicKey(self):
"""
Get the keyId used by this peer (this peer's identifier).
This is stored in the key store.
"""
return self.keyStore.getPublicKey()
def set(self, hostKeyId, storagePath, storageValue):
"""
Set a value on a host.
@param hostKeyId: The key id for the destination host to set the
given key. This could be the local host, in which case the hostKey
will be the same as this C{Peer}'s keyStore keyId.
@param storagePath: The path to the key to set. For instance, this
could be something like /chat/<somekey>/inbox.
@param storageValue: The value to set.
"""
if hostKeyId == self.getKeyId():
return self.storage.set(hostKeyId, storagePath, storageValue)
return self.pool.send(hostKeyId, 'set', storagePath, storageValue)
def get(self, hostKeyId, storagePath):
"""
Get a value from a host.
@param hostKeyId: The key id for the destination host to get the
given key. This could be the local host, in which case the hostKey
will be the same as this C{Peer}'s keyStore keyId.
@param storagePath: The path to the key to get. For instance, this
could be something like /chat/<somekey>/inbox.
"""
if hostKeyId == self.getKeyId():
self.log.debug("getting storagePath %s on self" % storagePath)
return self.storage.get(hostKeyId, storagePath)
self.log.debug("getting storagePath %s on %s" % (storagePath, hostKeyId))
return self.pool.send(hostKeyId, 'get', storagePath)
def push(self, hostKeyId, storagePath, storageValue):
"""
Given key, create a new key at <key>/<id> with the given value, where <id>
is an auto-incrementing integer value starting at 0.
"""
if hostKeyId == self.getKeyId():
return self.storage.push(hostKeyId, storagePath, storageValue)
return self.pool.send(hostKeyId, 'push', storagePath, storageValue)
def ls(self, hostKeyId, storagePath, offset, length):
"""
Given key, get all children keys (with the given offset and length). Length cannot
be more than 1000.
"""
if hostKeyId == self.getKeyId():
return self.storage.ls(hostKeyId, storagePath, offset, length)
return self.pool.send(hostKeyId, 'ls', storagePath, offset, length)
|
_test_null_distribution_basic
|
Test if de.wald() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distriubution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
|
import logging
import unittest
import numpy as np
import pandas as pd
import scipy.stats as stats
import diffxpy.api as de
class _TestPairwiseNull:
noise_model: str
def _prepate_data(
self,
n_cells: int,
n_genes: int,
n_groups: int
):
if self.noise_model == "nb":
from batchglm.api.models.glm_nb import Simulator
rand_fn_loc = lambda shape: np.random.uniform(0.1, 1, shape)
rand_fn_scale = lambda shape: np.random.uniform(0.5, 1, shape)
elif self.noise_model == "norm" or self.noise_model is None:
from batchglm.api.models.glm_norm import Simulator
rand_fn_loc = lambda shape: np.random.uniform(500, 1000, shape)
rand_fn_scale = lambda shape: np.random.uniform(1, 2, shape)
else:
raise ValueError("noise model %s not recognized" % self.noise_model)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate_params(
rand_fn_loc=rand_fn_loc,
rand_fn_scale=rand_fn_scale
)
sim.generate_data()
random_sample_description = pd.DataFrame({
"condition": np.random.randint(n_groups, size=sim.nobs)
})
return sim, random_sample_description
# MASKED: _test_null_distribution_basic function (lines 44-86)
class TestPairwiseNullStandard(unittest.TestCase, _TestPairwiseNull):
def test_null_distribution_ttest(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = None
self._test_null_distribution_basic(test="t-test", lazy=False)
def test_null_distribution_rank(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = None
self._test_null_distribution_basic(test="rank", lazy=False)
class TestPairwiseNullNb(unittest.TestCase, _TestPairwiseNull):
def test_null_distribution_ztest(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="z-test", lazy=False, quick_scale=False)
self._test_null_distribution_basic(test="z-test", lazy=False, quick_scale=True)
def test_null_distribution_ztest_lazy(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="z-test", lazy=True, quick_scale=False)
self._test_null_distribution_basic(test="z-test", lazy=True, quick_scale=True)
def test_null_distribution_wald(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="wald", lazy=False, quick_scale=False)
self._test_null_distribution_basic(test="wald", lazy=False, quick_scale=True)
def test_null_distribution_lrt(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="lrt", lazy=False, quick_scale=False)
if __name__ == '__main__':
unittest.main()
|
def _test_null_distribution_basic(
self,
test: str,
lazy: bool,
quick_scale: bool = False,
n_cells: int = 3000,
n_genes: int = 200,
n_groups: int = 3
):
"""
Test if de.wald() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distriubution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
sim, sample_description = self._prepate_data(
n_cells=n_cells,
n_genes=n_genes,
n_groups=n_groups
)
test = de.test.pairwise(
data=sim.input_data,
sample_description=sample_description,
grouping="condition",
test=test,
lazy=lazy,
quick_scale=quick_scale,
noise_model=self.noise_model
)
_ = test.summary()
# Compare p-value distribution under null model against uniform distribution.
if lazy:
pval_h0 = stats.kstest(test.pval_pairs(groups0=0, groups1=1).flatten(), 'uniform').pvalue
else:
pval_h0 = stats.kstest(test.pval[0, 1, :].flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0=%f is <= 0.05!" % np.round(pval_h0, 5)
return True
| 44 | 86 |
import logging
import unittest
import numpy as np
import pandas as pd
import scipy.stats as stats
import diffxpy.api as de
class _TestPairwiseNull:
noise_model: str
def _prepate_data(
self,
n_cells: int,
n_genes: int,
n_groups: int
):
if self.noise_model == "nb":
from batchglm.api.models.glm_nb import Simulator
rand_fn_loc = lambda shape: np.random.uniform(0.1, 1, shape)
rand_fn_scale = lambda shape: np.random.uniform(0.5, 1, shape)
elif self.noise_model == "norm" or self.noise_model is None:
from batchglm.api.models.glm_norm import Simulator
rand_fn_loc = lambda shape: np.random.uniform(500, 1000, shape)
rand_fn_scale = lambda shape: np.random.uniform(1, 2, shape)
else:
raise ValueError("noise model %s not recognized" % self.noise_model)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate_params(
rand_fn_loc=rand_fn_loc,
rand_fn_scale=rand_fn_scale
)
sim.generate_data()
random_sample_description = pd.DataFrame({
"condition": np.random.randint(n_groups, size=sim.nobs)
})
return sim, random_sample_description
def _test_null_distribution_basic(
self,
test: str,
lazy: bool,
quick_scale: bool = False,
n_cells: int = 3000,
n_genes: int = 200,
n_groups: int = 3
):
"""
Test if de.wald() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distriubution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
sim, sample_description = self._prepate_data(
n_cells=n_cells,
n_genes=n_genes,
n_groups=n_groups
)
test = de.test.pairwise(
data=sim.input_data,
sample_description=sample_description,
grouping="condition",
test=test,
lazy=lazy,
quick_scale=quick_scale,
noise_model=self.noise_model
)
_ = test.summary()
# Compare p-value distribution under null model against uniform distribution.
if lazy:
pval_h0 = stats.kstest(test.pval_pairs(groups0=0, groups1=1).flatten(), 'uniform').pvalue
else:
pval_h0 = stats.kstest(test.pval[0, 1, :].flatten(), 'uniform').pvalue
logging.getLogger("diffxpy").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0=%f is <= 0.05!" % np.round(pval_h0, 5)
return True
class TestPairwiseNullStandard(unittest.TestCase, _TestPairwiseNull):
def test_null_distribution_ttest(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = None
self._test_null_distribution_basic(test="t-test", lazy=False)
def test_null_distribution_rank(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = None
self._test_null_distribution_basic(test="rank", lazy=False)
class TestPairwiseNullNb(unittest.TestCase, _TestPairwiseNull):
def test_null_distribution_ztest(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="z-test", lazy=False, quick_scale=False)
self._test_null_distribution_basic(test="z-test", lazy=False, quick_scale=True)
def test_null_distribution_ztest_lazy(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="z-test", lazy=True, quick_scale=False)
self._test_null_distribution_basic(test="z-test", lazy=True, quick_scale=True)
def test_null_distribution_wald(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="wald", lazy=False, quick_scale=False)
self._test_null_distribution_basic(test="wald", lazy=False, quick_scale=True)
def test_null_distribution_lrt(self):
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
np.random.seed(1)
self.noise_model = "nb"
self._test_null_distribution_basic(test="lrt", lazy=False, quick_scale=False)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.